id stringlengths 25 30 | content stringlengths 14 942k | max_stars_repo_path stringlengths 49 55 |
|---|---|---|
crossvul-cpp_data_bad_295_1 | /* $OpenBSD: auth2-hostbased.c,v 1.35 2018/07/09 21:35:50 markus Exp $ */
/*
* Copyright (c) 2000 Markus Friedl. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#include <pwd.h>
#include <string.h>
#include <stdarg.h>
#include "xmalloc.h"
#include "ssh2.h"
#include "packet.h"
#include "sshbuf.h"
#include "log.h"
#include "misc.h"
#include "servconf.h"
#include "compat.h"
#include "sshkey.h"
#include "hostfile.h"
#include "auth.h"
#include "canohost.h"
#ifdef GSSAPI
#include "ssh-gss.h"
#endif
#include "monitor_wrap.h"
#include "pathnames.h"
#include "ssherr.h"
#include "match.h"
/* import */
extern ServerOptions options;
extern u_char *session_id2;
extern u_int session_id2_len;
static int
userauth_hostbased(struct ssh *ssh)
{
Authctxt *authctxt = ssh->authctxt;
struct sshbuf *b;
struct sshkey *key = NULL;
char *pkalg, *cuser, *chost;
u_char *pkblob, *sig;
size_t alen, blen, slen;
int r, pktype, authenticated = 0;
if (!authctxt->valid) {
debug2("%s: disabled because of invalid user", __func__);
return 0;
}
/* XXX use sshkey_froms() */
if ((r = sshpkt_get_cstring(ssh, &pkalg, &alen)) != 0 ||
(r = sshpkt_get_string(ssh, &pkblob, &blen)) != 0 ||
(r = sshpkt_get_cstring(ssh, &chost, NULL)) != 0 ||
(r = sshpkt_get_cstring(ssh, &cuser, NULL)) != 0 ||
(r = sshpkt_get_string(ssh, &sig, &slen)) != 0)
fatal("%s: packet parsing: %s", __func__, ssh_err(r));
debug("%s: cuser %s chost %s pkalg %s slen %zu", __func__,
cuser, chost, pkalg, slen);
#ifdef DEBUG_PK
debug("signature:");
sshbuf_dump_data(sig, siglen, stderr);
#endif
pktype = sshkey_type_from_name(pkalg);
if (pktype == KEY_UNSPEC) {
/* this is perfectly legal */
logit("%s: unsupported public key algorithm: %s",
__func__, pkalg);
goto done;
}
if ((r = sshkey_from_blob(pkblob, blen, &key)) != 0) {
error("%s: key_from_blob: %s", __func__, ssh_err(r));
goto done;
}
if (key == NULL) {
error("%s: cannot decode key: %s", __func__, pkalg);
goto done;
}
if (key->type != pktype) {
error("%s: type mismatch for decoded key "
"(received %d, expected %d)", __func__, key->type, pktype);
goto done;
}
if (sshkey_type_plain(key->type) == KEY_RSA &&
(ssh->compat & SSH_BUG_RSASIGMD5) != 0) {
error("Refusing RSA key because peer uses unsafe "
"signature format");
goto done;
}
if (match_pattern_list(pkalg, options.hostbased_key_types, 0) != 1) {
logit("%s: key type %s not in HostbasedAcceptedKeyTypes",
__func__, sshkey_type(key));
goto done;
}
if ((b = sshbuf_new()) == NULL)
fatal("%s: sshbuf_new failed", __func__);
/* reconstruct packet */
if ((r = sshbuf_put_string(b, session_id2, session_id2_len)) != 0 ||
(r = sshbuf_put_u8(b, SSH2_MSG_USERAUTH_REQUEST)) != 0 ||
(r = sshbuf_put_cstring(b, authctxt->user)) != 0 ||
(r = sshbuf_put_cstring(b, authctxt->service)) != 0 ||
(r = sshbuf_put_cstring(b, "hostbased")) != 0 ||
(r = sshbuf_put_string(b, pkalg, alen)) != 0 ||
(r = sshbuf_put_string(b, pkblob, blen)) != 0 ||
(r = sshbuf_put_cstring(b, chost)) != 0 ||
(r = sshbuf_put_cstring(b, cuser)) != 0)
fatal("%s: buffer error: %s", __func__, ssh_err(r));
#ifdef DEBUG_PK
sshbuf_dump(b, stderr);
#endif
auth2_record_info(authctxt,
"client user \"%.100s\", client host \"%.100s\"", cuser, chost);
/* test for allowed key and correct signature */
authenticated = 0;
if (PRIVSEP(hostbased_key_allowed(authctxt->pw, cuser, chost, key)) &&
PRIVSEP(sshkey_verify(key, sig, slen,
sshbuf_ptr(b), sshbuf_len(b), pkalg, ssh->compat)) == 0)
authenticated = 1;
auth2_record_key(authctxt, authenticated, key);
sshbuf_free(b);
done:
debug2("%s: authenticated %d", __func__, authenticated);
sshkey_free(key);
free(pkalg);
free(pkblob);
free(cuser);
free(chost);
free(sig);
return authenticated;
}
/* return 1 if given hostkey is allowed */
int
hostbased_key_allowed(struct passwd *pw, const char *cuser, char *chost,
struct sshkey *key)
{
struct ssh *ssh = active_state; /* XXX */
const char *resolvedname, *ipaddr, *lookup, *reason;
HostStatus host_status;
int len;
char *fp;
if (auth_key_is_revoked(key))
return 0;
resolvedname = auth_get_canonical_hostname(ssh, options.use_dns);
ipaddr = ssh_remote_ipaddr(ssh);
debug2("%s: chost %s resolvedname %s ipaddr %s", __func__,
chost, resolvedname, ipaddr);
if (((len = strlen(chost)) > 0) && chost[len - 1] == '.') {
debug2("stripping trailing dot from chost %s", chost);
chost[len - 1] = '\0';
}
if (options.hostbased_uses_name_from_packet_only) {
if (auth_rhosts2(pw, cuser, chost, chost) == 0) {
debug2("%s: auth_rhosts2 refused "
"user \"%.100s\" host \"%.100s\" (from packet)",
__func__, cuser, chost);
return 0;
}
lookup = chost;
} else {
if (strcasecmp(resolvedname, chost) != 0)
logit("userauth_hostbased mismatch: "
"client sends %s, but we resolve %s to %s",
chost, ipaddr, resolvedname);
if (auth_rhosts2(pw, cuser, resolvedname, ipaddr) == 0) {
debug2("%s: auth_rhosts2 refused "
"user \"%.100s\" host \"%.100s\" addr \"%.100s\"",
__func__, cuser, resolvedname, ipaddr);
return 0;
}
lookup = resolvedname;
}
debug2("%s: access allowed by auth_rhosts2", __func__);
if (sshkey_is_cert(key) &&
sshkey_cert_check_authority(key, 1, 0, lookup, &reason)) {
error("%s", reason);
auth_debug_add("%s", reason);
return 0;
}
host_status = check_key_in_hostfiles(pw, key, lookup,
_PATH_SSH_SYSTEM_HOSTFILE,
options.ignore_user_known_hosts ? NULL : _PATH_SSH_USER_HOSTFILE);
/* backward compat if no key has been found. */
if (host_status == HOST_NEW) {
host_status = check_key_in_hostfiles(pw, key, lookup,
_PATH_SSH_SYSTEM_HOSTFILE2,
options.ignore_user_known_hosts ? NULL :
_PATH_SSH_USER_HOSTFILE2);
}
if (host_status == HOST_OK) {
if (sshkey_is_cert(key)) {
if ((fp = sshkey_fingerprint(key->cert->signature_key,
options.fingerprint_hash, SSH_FP_DEFAULT)) == NULL)
fatal("%s: sshkey_fingerprint fail", __func__);
verbose("Accepted certificate ID \"%s\" signed by "
"%s CA %s from %s@%s", key->cert->key_id,
sshkey_type(key->cert->signature_key), fp,
cuser, lookup);
} else {
if ((fp = sshkey_fingerprint(key,
options.fingerprint_hash, SSH_FP_DEFAULT)) == NULL)
fatal("%s: sshkey_fingerprint fail", __func__);
verbose("Accepted %s public key %s from %s@%s",
sshkey_type(key), fp, cuser, lookup);
}
free(fp);
}
return (host_status == HOST_OK);
}
Authmethod method_hostbased = {
"hostbased",
userauth_hostbased,
&options.hostbased_authentication
};
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_295_1 |
crossvul-cpp_data_good_5684_0 | /*
RFCOMM implementation for Linux Bluetooth stack (BlueZ).
Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/*
* RFCOMM sockets.
*/
#include <linux/export.h>
#include <linux/debugfs.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/rfcomm.h>
static const struct proto_ops rfcomm_sock_ops;
static struct bt_sock_list rfcomm_sk_list = {
.lock = __RW_LOCK_UNLOCKED(rfcomm_sk_list.lock)
};
static void rfcomm_sock_close(struct sock *sk);
static void rfcomm_sock_kill(struct sock *sk);
/* ---- DLC callbacks ----
*
* called under rfcomm_dlc_lock()
*/
static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
{
struct sock *sk = d->owner;
if (!sk)
return;
atomic_add(skb->len, &sk->sk_rmem_alloc);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, skb->len);
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
rfcomm_dlc_throttle(d);
}
static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
{
struct sock *sk = d->owner, *parent;
unsigned long flags;
if (!sk)
return;
BT_DBG("dlc %p state %ld err %d", d, d->state, err);
local_irq_save(flags);
bh_lock_sock(sk);
if (err)
sk->sk_err = err;
sk->sk_state = d->state;
parent = bt_sk(sk)->parent;
if (parent) {
if (d->state == BT_CLOSED) {
sock_set_flag(sk, SOCK_ZAPPED);
bt_accept_unlink(sk);
}
parent->sk_data_ready(parent, 0);
} else {
if (d->state == BT_CONNECTED)
rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL);
sk->sk_state_change(sk);
}
bh_unlock_sock(sk);
local_irq_restore(flags);
if (parent && sock_flag(sk, SOCK_ZAPPED)) {
/* We have to drop DLC lock here, otherwise
* rfcomm_sock_destruct() will dead lock. */
rfcomm_dlc_unlock(d);
rfcomm_sock_kill(sk);
rfcomm_dlc_lock(d);
}
}
/* ---- Socket functions ---- */
static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
{
struct sock *sk = NULL;
sk_for_each(sk, &rfcomm_sk_list.head) {
if (rfcomm_pi(sk)->channel == channel &&
!bacmp(&bt_sk(sk)->src, src))
break;
}
return sk ? sk : NULL;
}
/* Find socket with channel and source bdaddr.
* Returns closest match.
*/
static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
read_lock(&rfcomm_sk_list.lock);
sk_for_each(sk, &rfcomm_sk_list.head) {
if (state && sk->sk_state != state)
continue;
if (rfcomm_pi(sk)->channel == channel) {
/* Exact match. */
if (!bacmp(&bt_sk(sk)->src, src))
break;
/* Closest match */
if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
sk1 = sk;
}
}
read_unlock(&rfcomm_sk_list.lock);
return sk ? sk : sk1;
}
static void rfcomm_sock_destruct(struct sock *sk)
{
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
BT_DBG("sk %p dlc %p", sk, d);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
rfcomm_dlc_lock(d);
rfcomm_pi(sk)->dlc = NULL;
/* Detach DLC if it's owned by this socket */
if (d->owner == sk)
d->owner = NULL;
rfcomm_dlc_unlock(d);
rfcomm_dlc_put(d);
}
static void rfcomm_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
BT_DBG("parent %p", parent);
/* Close not yet accepted dlcs */
while ((sk = bt_accept_dequeue(parent, NULL))) {
rfcomm_sock_close(sk);
rfcomm_sock_kill(sk);
}
parent->sk_state = BT_CLOSED;
sock_set_flag(parent, SOCK_ZAPPED);
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
static void rfcomm_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
/* Kill poor orphan */
bt_sock_unlink(&rfcomm_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
static void __rfcomm_sock_close(struct sock *sk)
{
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
switch (sk->sk_state) {
case BT_LISTEN:
rfcomm_sock_cleanup_listen(sk);
break;
case BT_CONNECT:
case BT_CONNECT2:
case BT_CONFIG:
case BT_CONNECTED:
rfcomm_dlc_close(d, 0);
default:
sock_set_flag(sk, SOCK_ZAPPED);
break;
}
}
/* Close socket.
* Must be called on unlocked socket.
*/
static void rfcomm_sock_close(struct sock *sk)
{
lock_sock(sk);
__rfcomm_sock_close(sk);
release_sock(sk);
}
static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
{
struct rfcomm_pinfo *pi = rfcomm_pi(sk);
BT_DBG("sk %p", sk);
if (parent) {
sk->sk_type = parent->sk_type;
pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP,
&bt_sk(parent)->flags);
pi->sec_level = rfcomm_pi(parent)->sec_level;
pi->role_switch = rfcomm_pi(parent)->role_switch;
security_sk_clone(parent, sk);
} else {
pi->dlc->defer_setup = 0;
pi->sec_level = BT_SECURITY_LOW;
pi->role_switch = 0;
}
pi->dlc->sec_level = pi->sec_level;
pi->dlc->role_switch = pi->role_switch;
}
static struct proto rfcomm_proto = {
.name = "RFCOMM",
.owner = THIS_MODULE,
.obj_size = sizeof(struct rfcomm_pinfo)
};
static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
{
struct rfcomm_dlc *d;
struct sock *sk;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto);
if (!sk)
return NULL;
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
d = rfcomm_dlc_alloc(prio);
if (!d) {
sk_free(sk);
return NULL;
}
d->data_ready = rfcomm_sk_data_ready;
d->state_change = rfcomm_sk_state_change;
rfcomm_pi(sk)->dlc = d;
d->owner = sk;
sk->sk_destruct = rfcomm_sock_destruct;
sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT;
sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
bt_sock_link(&rfcomm_sk_list, sk);
BT_DBG("sk %p", sk);
return sk;
}
static int rfcomm_sock_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_STREAM && sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
sock->ops = &rfcomm_sock_ops;
sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC);
if (!sk)
return -ENOMEM;
rfcomm_sock_init(sk, NULL);
return 0;
}
static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
write_lock(&rfcomm_sk_list.lock);
if (sa->rc_channel && __rfcomm_get_sock_by_addr(sa->rc_channel, &sa->rc_bdaddr)) {
err = -EADDRINUSE;
} else {
/* Save source address */
bacpy(&bt_sk(sk)->src, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
sk->sk_state = BT_BOUND;
}
write_unlock(&rfcomm_sk_list.lock);
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
int err = 0;
BT_DBG("sk %p", sk);
if (alen < sizeof(struct sockaddr_rc) ||
addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
sk->sk_state = BT_CONNECT;
bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
d->sec_level = rfcomm_pi(sk)->sec_level;
d->role_switch = rfcomm_pi(sk)->role_switch;
err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel);
if (!err)
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
if (!rfcomm_pi(sk)->channel) {
bdaddr_t *src = &bt_sk(sk)->src;
u8 channel;
err = -EINVAL;
write_lock(&rfcomm_sk_list.lock);
for (channel = 1; channel < 31; channel++)
if (!__rfcomm_get_sock_by_addr(channel, src)) {
rfcomm_pi(sk)->channel = channel;
err = 0;
break;
}
write_unlock(&rfcomm_sk_list.lock);
if (err < 0)
goto done;
}
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *nsk;
long timeo;
int err = 0;
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
nsk = bt_accept_dequeue(sk, newsock);
if (nsk)
break;
if (!timeo) {
err = -EAGAIN;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
BT_DBG("new socket %p", nsk);
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
BT_DBG("sock %p, sk %p", sock, sk);
memset(sa, 0, sizeof(*sa));
sa->rc_family = AF_BLUETOOTH;
sa->rc_channel = rfcomm_pi(sk)->channel;
if (peer)
bacpy(&sa->rc_bdaddr, &bt_sk(sk)->dst);
else
bacpy(&sa->rc_bdaddr, &bt_sk(sk)->src);
*len = sizeof(struct sockaddr_rc);
return 0;
}
static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
struct sk_buff *skb;
int sent = 0;
if (test_bit(RFCOMM_DEFER_SETUP, &d->flags))
return -ENOTCONN;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
if (sk->sk_shutdown & SEND_SHUTDOWN)
return -EPIPE;
BT_DBG("sock %p, sk %p", sock, sk);
lock_sock(sk);
while (len) {
size_t size = min_t(size_t, len, d->mtu);
int err;
skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb) {
if (sent == 0)
sent = err;
break;
}
skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE);
err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
if (err) {
kfree_skb(skb);
if (sent == 0)
sent = err;
break;
}
skb->priority = sk->sk_priority;
err = rfcomm_dlc_send(d, skb);
if (err < 0) {
kfree_skb(skb);
if (sent == 0)
sent = err;
break;
}
sent += size;
len -= size;
}
release_sock(sk);
return sent;
}
static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
int len;
if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
rfcomm_dlc_accept(d);
msg->msg_namelen = 0;
return 0;
}
len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags);
lock_sock(sk);
if (!(flags & MSG_PEEK) && len > 0)
atomic_sub(len, &sk->sk_rmem_alloc);
if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2))
rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc);
release_sock(sk);
return len;
}
static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0;
u32 opt;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
case RFCOMM_LM:
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
if (opt & RFCOMM_LM_AUTH)
rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW;
if (opt & RFCOMM_LM_ENCRYPT)
rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
if (opt & RFCOMM_LM_SECURE)
rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH;
rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER);
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct bt_security sec;
int err = 0;
size_t len;
u32 opt;
BT_DBG("sk %p", sk);
if (level == SOL_RFCOMM)
return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen);
if (level != SOL_BLUETOOTH)
return -ENOPROTOOPT;
lock_sock(sk);
switch (optname) {
case BT_SECURITY:
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
break;
}
sec.level = BT_SECURITY_LOW;
len = min_t(unsigned int, sizeof(sec), optlen);
if (copy_from_user((char *) &sec, optval, len)) {
err = -EFAULT;
break;
}
if (sec.level > BT_SECURITY_HIGH) {
err = -EINVAL;
break;
}
rfcomm_pi(sk)->sec_level = sec.level;
break;
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
else
clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct rfcomm_conninfo cinfo;
struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
int len, err = 0;
u32 opt;
BT_DBG("sk %p", sk);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case RFCOMM_LM:
switch (rfcomm_pi(sk)->sec_level) {
case BT_SECURITY_LOW:
opt = RFCOMM_LM_AUTH;
break;
case BT_SECURITY_MEDIUM:
opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT;
break;
case BT_SECURITY_HIGH:
opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT |
RFCOMM_LM_SECURE;
break;
default:
opt = 0;
break;
}
if (rfcomm_pi(sk)->role_switch)
opt |= RFCOMM_LM_MASTER;
if (put_user(opt, (u32 __user *) optval))
err = -EFAULT;
break;
case RFCOMM_CONNINFO:
if (sk->sk_state != BT_CONNECTED &&
!rfcomm_pi(sk)->dlc->defer_setup) {
err = -ENOTCONN;
break;
}
memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = conn->hcon->handle;
memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct bt_security sec;
int len, err = 0;
BT_DBG("sk %p", sk);
if (level == SOL_RFCOMM)
return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen);
if (level != SOL_BLUETOOTH)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case BT_SECURITY:
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
break;
}
sec.level = rfcomm_pi(sk)->sec_level;
sec.key_size = 0;
len = min_t(unsigned int, len, sizeof(sec));
if (copy_to_user(optval, (char *) &sec, len))
err = -EFAULT;
break;
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
(u32 __user *) optval))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk __maybe_unused = sock->sk;
int err;
BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
err = bt_sock_ioctl(sock, cmd, arg);
if (err == -ENOIOCTLCMD) {
#ifdef CONFIG_BT_RFCOMM_TTY
lock_sock(sk);
err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg);
release_sock(sk);
#else
err = -EOPNOTSUPP;
#endif
}
return err;
}
static int rfcomm_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
lock_sock(sk);
if (!sk->sk_shutdown) {
sk->sk_shutdown = SHUTDOWN_MASK;
__rfcomm_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
}
release_sock(sk);
return err;
}
static int rfcomm_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
err = rfcomm_sock_shutdown(sock, 2);
sock_orphan(sk);
rfcomm_sock_kill(sk);
return err;
}
/* ---- RFCOMM core layer callbacks ----
*
* called under rfcomm_lock()
*/
int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d)
{
struct sock *sk, *parent;
bdaddr_t src, dst;
int result = 0;
BT_DBG("session %p channel %d", s, channel);
rfcomm_session_getaddr(s, &src, &dst);
/* Check if we have socket listening on channel */
parent = rfcomm_get_sock_by_channel(BT_LISTEN, channel, &src);
if (!parent)
return 0;
bh_lock_sock(parent);
/* Check for backlog size */
if (sk_acceptq_is_full(parent)) {
BT_DBG("backlog full %d", parent->sk_ack_backlog);
goto done;
}
sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC);
if (!sk)
goto done;
bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM);
rfcomm_sock_init(sk, parent);
bacpy(&bt_sk(sk)->src, &src);
bacpy(&bt_sk(sk)->dst, &dst);
rfcomm_pi(sk)->channel = channel;
sk->sk_state = BT_CONFIG;
bt_accept_enqueue(parent, sk);
/* Accept connection and return socket DLC */
*d = rfcomm_pi(sk)->dlc;
result = 1;
done:
bh_unlock_sock(parent);
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
parent->sk_state_change(parent);
return result;
}
static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
read_lock(&rfcomm_sk_list.lock);
sk_for_each(sk, &rfcomm_sk_list.head) {
seq_printf(f, "%pMR %pMR %d %d\n",
&bt_sk(sk)->src, &bt_sk(sk)->dst,
sk->sk_state, rfcomm_pi(sk)->channel);
}
read_unlock(&rfcomm_sk_list.lock);
return 0;
}
static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, rfcomm_sock_debugfs_show, inode->i_private);
}
static const struct file_operations rfcomm_sock_debugfs_fops = {
.open = rfcomm_sock_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *rfcomm_sock_debugfs;
static const struct proto_ops rfcomm_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = rfcomm_sock_release,
.bind = rfcomm_sock_bind,
.connect = rfcomm_sock_connect,
.listen = rfcomm_sock_listen,
.accept = rfcomm_sock_accept,
.getname = rfcomm_sock_getname,
.sendmsg = rfcomm_sock_sendmsg,
.recvmsg = rfcomm_sock_recvmsg,
.shutdown = rfcomm_sock_shutdown,
.setsockopt = rfcomm_sock_setsockopt,
.getsockopt = rfcomm_sock_getsockopt,
.ioctl = rfcomm_sock_ioctl,
.poll = bt_sock_poll,
.socketpair = sock_no_socketpair,
.mmap = sock_no_mmap
};
static const struct net_proto_family rfcomm_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = rfcomm_sock_create
};
int __init rfcomm_init_sockets(void)
{
int err;
err = proto_register(&rfcomm_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops);
if (err < 0) {
BT_ERR("RFCOMM socket layer registration failed");
goto error;
}
err = bt_procfs_init(THIS_MODULE, &init_net, "rfcomm", &rfcomm_sk_list, NULL);
if (err < 0) {
BT_ERR("Failed to create RFCOMM proc file");
bt_sock_unregister(BTPROTO_RFCOMM);
goto error;
}
if (bt_debugfs) {
rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
bt_debugfs, NULL, &rfcomm_sock_debugfs_fops);
if (!rfcomm_sock_debugfs)
BT_ERR("Failed to create RFCOMM debug file");
}
BT_INFO("RFCOMM socket layer initialized");
return 0;
error:
proto_unregister(&rfcomm_proto);
return err;
}
void __exit rfcomm_cleanup_sockets(void)
{
bt_procfs_cleanup(&init_net, "rfcomm");
debugfs_remove(rfcomm_sock_debugfs);
if (bt_sock_unregister(BTPROTO_RFCOMM) < 0)
BT_ERR("RFCOMM socket layer unregistration failed");
proto_unregister(&rfcomm_proto);
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_5684_0 |
crossvul-cpp_data_bad_5697_0 | /*
* VMware vSockets Driver
*
* Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
/* Implementation notes:
*
* - There are two kinds of sockets: those created by user action (such as
* calling socket(2)) and those created by incoming connection request packets.
*
* - There are two "global" tables, one for bound sockets (sockets that have
* specified an address that they are responsible for) and one for connected
* sockets (sockets that have established a connection with another socket).
* These tables are "global" in that all sockets on the system are placed
* within them. - Note, though, that the bound table contains an extra entry
* for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
* that list. The bound table is used solely for lookup of sockets when packets
* are received and that's not necessary for SOCK_DGRAM sockets since we create
* a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM
* sockets out of the bound hash buckets will reduce the chance of collisions
* when looking for SOCK_STREAM sockets and prevents us from having to check the
* socket type in the hash table lookups.
*
* - Sockets created by user action will either be "client" sockets that
* initiate a connection or "server" sockets that listen for connections; we do
* not support simultaneous connects (two "client" sockets connecting).
*
* - "Server" sockets are referred to as listener sockets throughout this
* implementation because they are in the SS_LISTEN state. When a connection
* request is received (the second kind of socket mentioned above), we create a
* new socket and refer to it as a pending socket. These pending sockets are
* placed on the pending connection list of the listener socket. When future
* packets are received for the address the listener socket is bound to, we
* check if the source of the packet is from one that has an existing pending
* connection. If it does, we process the packet for the pending socket. When
* that socket reaches the connected state, it is removed from the listener
* socket's pending list and enqueued in the listener socket's accept queue.
* Callers of accept(2) will accept connected sockets from the listener socket's
* accept queue. If the socket cannot be accepted for some reason then it is
* marked rejected. Once the connection is accepted, it is owned by the user
* process and the responsibility for cleanup falls with that user process.
*
* - It is possible that these pending sockets will never reach the connected
* state; in fact, we may never receive another packet after the connection
* request. Because of this, we must schedule a cleanup function to run in the
* future, after some amount of time passes where a connection should have been
* established. This function ensures that the socket is off all lists so it
* cannot be retrieved, then drops all references to the socket so it is cleaned
* up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this
* function will also cleanup rejected sockets, those that reach the connected
* state but leave it before they have been accepted.
*
* - Sockets created by user action will be cleaned up when the user process
* calls close(2), causing our release implementation to be called. Our release
* implementation will perform some cleanup then drop the last reference so our
* sk_destruct implementation is invoked. Our sk_destruct implementation will
* perform additional cleanup that's common for both types of sockets.
*
* - A socket's reference count is what ensures that the structure won't be
* freed. Each entry in a list (such as the "global" bound and connected tables
* and the listener socket's pending list and connected queue) ensures a
* reference. When we defer work until process context and pass a socket as our
* argument, we must ensure the reference count is increased to ensure the
* socket isn't freed before the function is run; the deferred function will
* then drop the reference.
*/
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/cred.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/net.h>
#include <linux/poll.h>
#include <linux/skbuff.h>
#include <linux/smp.h>
#include <linux/socket.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <net/sock.h>
#include "af_vsock.h"
static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
static void vsock_sk_destruct(struct sock *sk);
static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
/* Protocol family. */
static struct proto vsock_proto = {
.name = "AF_VSOCK",
.owner = THIS_MODULE,
.obj_size = sizeof(struct vsock_sock),
};
/* The default peer timeout indicates how long we will wait for a peer response
* to a control message.
*/
#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
#define SS_LISTEN 255
static const struct vsock_transport *transport;
static DEFINE_MUTEX(vsock_register_mutex);
/**** EXPORTS ****/
/* Get the ID of the local context. This is transport dependent. */
int vm_sockets_get_local_cid(void)
{
return transport->get_local_cid();
}
EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid);
/**** UTILS ****/
/* Each bound VSocket is stored in the bind hash table and each connected
* VSocket is stored in the connected hash table.
*
* Unbound sockets are all put on the same list attached to the end of the hash
* table (vsock_unbound_sockets). Bound sockets are added to the hash table in
* the bucket that their local address hashes to (vsock_bound_sockets(addr)
* represents the list that addr hashes to).
*
* Specifically, we initialize the vsock_bind_table array to a size of
* VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
* vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
* vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function
* mods with VSOCK_HASH_SIZE - 1 to ensure this.
*/
#define VSOCK_HASH_SIZE 251
#define MAX_PORT_RETRIES 24
#define VSOCK_HASH(addr) ((addr)->svm_port % (VSOCK_HASH_SIZE - 1))
#define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
#define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE])
/* XXX This can probably be implemented in a better way. */
#define VSOCK_CONN_HASH(src, dst) \
(((src)->svm_cid ^ (dst)->svm_port) % (VSOCK_HASH_SIZE - 1))
#define vsock_connected_sockets(src, dst) \
(&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
#define vsock_connected_sockets_vsk(vsk) \
vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
static struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
static struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
static DEFINE_SPINLOCK(vsock_table_lock);
static __init void vsock_init_tables(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++)
INIT_LIST_HEAD(&vsock_bind_table[i]);
for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
INIT_LIST_HEAD(&vsock_connected_table[i]);
}
static void __vsock_insert_bound(struct list_head *list,
struct vsock_sock *vsk)
{
sock_hold(&vsk->sk);
list_add(&vsk->bound_table, list);
}
static void __vsock_insert_connected(struct list_head *list,
struct vsock_sock *vsk)
{
sock_hold(&vsk->sk);
list_add(&vsk->connected_table, list);
}
static void __vsock_remove_bound(struct vsock_sock *vsk)
{
list_del_init(&vsk->bound_table);
sock_put(&vsk->sk);
}
static void __vsock_remove_connected(struct vsock_sock *vsk)
{
list_del_init(&vsk->connected_table);
sock_put(&vsk->sk);
}
static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
{
struct vsock_sock *vsk;
list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table)
if (addr->svm_port == vsk->local_addr.svm_port)
return sk_vsock(vsk);
return NULL;
}
static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
struct sockaddr_vm *dst)
{
struct vsock_sock *vsk;
list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
connected_table) {
if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
dst->svm_port == vsk->local_addr.svm_port) {
return sk_vsock(vsk);
}
}
return NULL;
}
static bool __vsock_in_bound_table(struct vsock_sock *vsk)
{
return !list_empty(&vsk->bound_table);
}
static bool __vsock_in_connected_table(struct vsock_sock *vsk)
{
return !list_empty(&vsk->connected_table);
}
static void vsock_insert_unbound(struct vsock_sock *vsk)
{
spin_lock_bh(&vsock_table_lock);
__vsock_insert_bound(vsock_unbound_sockets, vsk);
spin_unlock_bh(&vsock_table_lock);
}
void vsock_insert_connected(struct vsock_sock *vsk)
{
struct list_head *list = vsock_connected_sockets(
&vsk->remote_addr, &vsk->local_addr);
spin_lock_bh(&vsock_table_lock);
__vsock_insert_connected(list, vsk);
spin_unlock_bh(&vsock_table_lock);
}
EXPORT_SYMBOL_GPL(vsock_insert_connected);
void vsock_remove_bound(struct vsock_sock *vsk)
{
spin_lock_bh(&vsock_table_lock);
__vsock_remove_bound(vsk);
spin_unlock_bh(&vsock_table_lock);
}
EXPORT_SYMBOL_GPL(vsock_remove_bound);
void vsock_remove_connected(struct vsock_sock *vsk)
{
spin_lock_bh(&vsock_table_lock);
__vsock_remove_connected(vsk);
spin_unlock_bh(&vsock_table_lock);
}
EXPORT_SYMBOL_GPL(vsock_remove_connected);
struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr)
{
struct sock *sk;
spin_lock_bh(&vsock_table_lock);
sk = __vsock_find_bound_socket(addr);
if (sk)
sock_hold(sk);
spin_unlock_bh(&vsock_table_lock);
return sk;
}
EXPORT_SYMBOL_GPL(vsock_find_bound_socket);
struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
struct sockaddr_vm *dst)
{
struct sock *sk;
spin_lock_bh(&vsock_table_lock);
sk = __vsock_find_connected_socket(src, dst);
if (sk)
sock_hold(sk);
spin_unlock_bh(&vsock_table_lock);
return sk;
}
EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
static bool vsock_in_bound_table(struct vsock_sock *vsk)
{
bool ret;
spin_lock_bh(&vsock_table_lock);
ret = __vsock_in_bound_table(vsk);
spin_unlock_bh(&vsock_table_lock);
return ret;
}
static bool vsock_in_connected_table(struct vsock_sock *vsk)
{
bool ret;
spin_lock_bh(&vsock_table_lock);
ret = __vsock_in_connected_table(vsk);
spin_unlock_bh(&vsock_table_lock);
return ret;
}
void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
{
int i;
spin_lock_bh(&vsock_table_lock);
for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
struct vsock_sock *vsk;
list_for_each_entry(vsk, &vsock_connected_table[i],
connected_table);
fn(sk_vsock(vsk));
}
spin_unlock_bh(&vsock_table_lock);
}
EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket);
void vsock_add_pending(struct sock *listener, struct sock *pending)
{
struct vsock_sock *vlistener;
struct vsock_sock *vpending;
vlistener = vsock_sk(listener);
vpending = vsock_sk(pending);
sock_hold(pending);
sock_hold(listener);
list_add_tail(&vpending->pending_links, &vlistener->pending_links);
}
EXPORT_SYMBOL_GPL(vsock_add_pending);
void vsock_remove_pending(struct sock *listener, struct sock *pending)
{
struct vsock_sock *vpending = vsock_sk(pending);
list_del_init(&vpending->pending_links);
sock_put(listener);
sock_put(pending);
}
EXPORT_SYMBOL_GPL(vsock_remove_pending);
void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
{
struct vsock_sock *vlistener;
struct vsock_sock *vconnected;
vlistener = vsock_sk(listener);
vconnected = vsock_sk(connected);
sock_hold(connected);
sock_hold(listener);
list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue);
}
EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
static struct sock *vsock_dequeue_accept(struct sock *listener)
{
struct vsock_sock *vlistener;
struct vsock_sock *vconnected;
vlistener = vsock_sk(listener);
if (list_empty(&vlistener->accept_queue))
return NULL;
vconnected = list_entry(vlistener->accept_queue.next,
struct vsock_sock, accept_queue);
list_del_init(&vconnected->accept_queue);
sock_put(listener);
/* The caller will need a reference on the connected socket so we let
* it call sock_put().
*/
return sk_vsock(vconnected);
}
static bool vsock_is_accept_queue_empty(struct sock *sk)
{
struct vsock_sock *vsk = vsock_sk(sk);
return list_empty(&vsk->accept_queue);
}
static bool vsock_is_pending(struct sock *sk)
{
struct vsock_sock *vsk = vsock_sk(sk);
return !list_empty(&vsk->pending_links);
}
static int vsock_send_shutdown(struct sock *sk, int mode)
{
return transport->shutdown(vsock_sk(sk), mode);
}
void vsock_pending_work(struct work_struct *work)
{
struct sock *sk;
struct sock *listener;
struct vsock_sock *vsk;
bool cleanup;
vsk = container_of(work, struct vsock_sock, dwork.work);
sk = sk_vsock(vsk);
listener = vsk->listener;
cleanup = true;
lock_sock(listener);
lock_sock(sk);
if (vsock_is_pending(sk)) {
vsock_remove_pending(listener, sk);
} else if (!vsk->rejected) {
/* We are not on the pending list and accept() did not reject
* us, so we must have been accepted by our user process. We
* just need to drop our references to the sockets and be on
* our way.
*/
cleanup = false;
goto out;
}
listener->sk_ack_backlog--;
/* We need to remove ourself from the global connected sockets list so
* incoming packets can't find this socket, and to reduce the reference
* count.
*/
if (vsock_in_connected_table(vsk))
vsock_remove_connected(vsk);
sk->sk_state = SS_FREE;
out:
release_sock(sk);
release_sock(listener);
if (cleanup)
sock_put(sk);
sock_put(sk);
sock_put(listener);
}
EXPORT_SYMBOL_GPL(vsock_pending_work);
/**** SOCKET OPERATIONS ****/
static int __vsock_bind_stream(struct vsock_sock *vsk,
struct sockaddr_vm *addr)
{
static u32 port = LAST_RESERVED_PORT + 1;
struct sockaddr_vm new_addr;
vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
if (addr->svm_port == VMADDR_PORT_ANY) {
bool found = false;
unsigned int i;
for (i = 0; i < MAX_PORT_RETRIES; i++) {
if (port <= LAST_RESERVED_PORT)
port = LAST_RESERVED_PORT + 1;
new_addr.svm_port = port++;
if (!__vsock_find_bound_socket(&new_addr)) {
found = true;
break;
}
}
if (!found)
return -EADDRNOTAVAIL;
} else {
/* If port is in reserved range, ensure caller
* has necessary privileges.
*/
if (addr->svm_port <= LAST_RESERVED_PORT &&
!capable(CAP_NET_BIND_SERVICE)) {
return -EACCES;
}
if (__vsock_find_bound_socket(&new_addr))
return -EADDRINUSE;
}
vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
/* Remove stream sockets from the unbound list and add them to the hash
* table for easy lookup by its address. The unbound list is simply an
* extra entry at the end of the hash table, a trick used by AF_UNIX.
*/
__vsock_remove_bound(vsk);
__vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
return 0;
}
static int __vsock_bind_dgram(struct vsock_sock *vsk,
struct sockaddr_vm *addr)
{
return transport->dgram_bind(vsk, addr);
}
static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
{
struct vsock_sock *vsk = vsock_sk(sk);
u32 cid;
int retval;
/* First ensure this socket isn't already bound. */
if (vsock_addr_bound(&vsk->local_addr))
return -EINVAL;
/* Now bind to the provided address or select appropriate values if
* none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
* like AF_INET prevents binding to a non-local IP address (in most
* cases), we only allow binding to the local CID.
*/
cid = transport->get_local_cid();
if (addr->svm_cid != cid && addr->svm_cid != VMADDR_CID_ANY)
return -EADDRNOTAVAIL;
switch (sk->sk_socket->type) {
case SOCK_STREAM:
spin_lock_bh(&vsock_table_lock);
retval = __vsock_bind_stream(vsk, addr);
spin_unlock_bh(&vsock_table_lock);
break;
case SOCK_DGRAM:
retval = __vsock_bind_dgram(vsk, addr);
break;
default:
retval = -EINVAL;
break;
}
return retval;
}
struct sock *__vsock_create(struct net *net,
struct socket *sock,
struct sock *parent,
gfp_t priority,
unsigned short type)
{
struct sock *sk;
struct vsock_sock *psk;
struct vsock_sock *vsk;
sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto);
if (!sk)
return NULL;
sock_init_data(sock, sk);
/* sk->sk_type is normally set in sock_init_data, but only if sock is
* non-NULL. We make sure that our sockets always have a type by
* setting it here if needed.
*/
if (!sock)
sk->sk_type = type;
vsk = vsock_sk(sk);
vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
sk->sk_destruct = vsock_sk_destruct;
sk->sk_backlog_rcv = vsock_queue_rcv_skb;
sk->sk_state = 0;
sock_reset_flag(sk, SOCK_DONE);
INIT_LIST_HEAD(&vsk->bound_table);
INIT_LIST_HEAD(&vsk->connected_table);
vsk->listener = NULL;
INIT_LIST_HEAD(&vsk->pending_links);
INIT_LIST_HEAD(&vsk->accept_queue);
vsk->rejected = false;
vsk->sent_request = false;
vsk->ignore_connecting_rst = false;
vsk->peer_shutdown = 0;
psk = parent ? vsock_sk(parent) : NULL;
if (parent) {
vsk->trusted = psk->trusted;
vsk->owner = get_cred(psk->owner);
vsk->connect_timeout = psk->connect_timeout;
} else {
vsk->trusted = capable(CAP_NET_ADMIN);
vsk->owner = get_current_cred();
vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
}
if (transport->init(vsk, psk) < 0) {
sk_free(sk);
return NULL;
}
if (sock)
vsock_insert_unbound(vsk);
return sk;
}
EXPORT_SYMBOL_GPL(__vsock_create);
static void __vsock_release(struct sock *sk)
{
if (sk) {
struct sk_buff *skb;
struct sock *pending;
struct vsock_sock *vsk;
vsk = vsock_sk(sk);
pending = NULL; /* Compiler warning. */
if (vsock_in_bound_table(vsk))
vsock_remove_bound(vsk);
if (vsock_in_connected_table(vsk))
vsock_remove_connected(vsk);
transport->release(vsk);
lock_sock(sk);
sock_orphan(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
while ((skb = skb_dequeue(&sk->sk_receive_queue)))
kfree_skb(skb);
/* Clean up any sockets that never were accepted. */
while ((pending = vsock_dequeue_accept(sk)) != NULL) {
__vsock_release(pending);
sock_put(pending);
}
release_sock(sk);
sock_put(sk);
}
}
static void vsock_sk_destruct(struct sock *sk)
{
struct vsock_sock *vsk = vsock_sk(sk);
transport->destruct(vsk);
/* When clearing these addresses, there's no need to set the family and
* possibly register the address family with the kernel.
*/
vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
put_cred(vsk->owner);
}
static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int err;
err = sock_queue_rcv_skb(sk, skb);
if (err)
kfree_skb(skb);
return err;
}
s64 vsock_stream_has_data(struct vsock_sock *vsk)
{
return transport->stream_has_data(vsk);
}
EXPORT_SYMBOL_GPL(vsock_stream_has_data);
s64 vsock_stream_has_space(struct vsock_sock *vsk)
{
return transport->stream_has_space(vsk);
}
EXPORT_SYMBOL_GPL(vsock_stream_has_space);
static int vsock_release(struct socket *sock)
{
__vsock_release(sock->sk);
sock->sk = NULL;
sock->state = SS_FREE;
return 0;
}
static int
vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
int err;
struct sock *sk;
struct sockaddr_vm *vm_addr;
sk = sock->sk;
if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0)
return -EINVAL;
lock_sock(sk);
err = __vsock_bind(sk, vm_addr);
release_sock(sk);
return err;
}
static int vsock_getname(struct socket *sock,
struct sockaddr *addr, int *addr_len, int peer)
{
int err;
struct sock *sk;
struct vsock_sock *vsk;
struct sockaddr_vm *vm_addr;
sk = sock->sk;
vsk = vsock_sk(sk);
err = 0;
lock_sock(sk);
if (peer) {
if (sock->state != SS_CONNECTED) {
err = -ENOTCONN;
goto out;
}
vm_addr = &vsk->remote_addr;
} else {
vm_addr = &vsk->local_addr;
}
if (!vm_addr) {
err = -EINVAL;
goto out;
}
/* sys_getsockname() and sys_getpeername() pass us a
* MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
* that macro is defined in socket.c instead of .h, so we hardcode its
* value here.
*/
BUILD_BUG_ON(sizeof(*vm_addr) > 128);
memcpy(addr, vm_addr, sizeof(*vm_addr));
*addr_len = sizeof(*vm_addr);
out:
release_sock(sk);
return err;
}
static int vsock_shutdown(struct socket *sock, int mode)
{
int err;
struct sock *sk;
/* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
* RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
* here like the other address families do. Note also that the
* increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
* which is what we want.
*/
mode++;
if ((mode & ~SHUTDOWN_MASK) || !mode)
return -EINVAL;
/* If this is a STREAM socket and it is not connected then bail out
* immediately. If it is a DGRAM socket then we must first kick the
* socket so that it wakes up from any sleeping calls, for example
* recv(), and then afterwards return the error.
*/
sk = sock->sk;
if (sock->state == SS_UNCONNECTED) {
err = -ENOTCONN;
if (sk->sk_type == SOCK_STREAM)
return err;
} else {
sock->state = SS_DISCONNECTING;
err = 0;
}
/* Receive and send shutdowns are treated alike. */
mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
if (mode) {
lock_sock(sk);
sk->sk_shutdown |= mode;
sk->sk_state_change(sk);
release_sock(sk);
if (sk->sk_type == SOCK_STREAM) {
sock_reset_flag(sk, SOCK_DONE);
vsock_send_shutdown(sk, mode);
}
}
return err;
}
static unsigned int vsock_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk;
unsigned int mask;
struct vsock_sock *vsk;
sk = sock->sk;
vsk = vsock_sk(sk);
poll_wait(file, sk_sleep(sk), wait);
mask = 0;
if (sk->sk_err)
/* Signify that there has been an error on this socket. */
mask |= POLLERR;
/* INET sockets treat local write shutdown and peer write shutdown as a
* case of POLLHUP set.
*/
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
((sk->sk_shutdown & SEND_SHUTDOWN) &&
(vsk->peer_shutdown & SEND_SHUTDOWN))) {
mask |= POLLHUP;
}
if (sk->sk_shutdown & RCV_SHUTDOWN ||
vsk->peer_shutdown & SEND_SHUTDOWN) {
mask |= POLLRDHUP;
}
if (sock->type == SOCK_DGRAM) {
/* For datagram sockets we can read if there is something in
* the queue and write as long as the socket isn't shutdown for
* sending.
*/
if (!skb_queue_empty(&sk->sk_receive_queue) ||
(sk->sk_shutdown & RCV_SHUTDOWN)) {
mask |= POLLIN | POLLRDNORM;
}
if (!(sk->sk_shutdown & SEND_SHUTDOWN))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
} else if (sock->type == SOCK_STREAM) {
lock_sock(sk);
/* Listening sockets that have connections in their accept
* queue can be read.
*/
if (sk->sk_state == SS_LISTEN
&& !vsock_is_accept_queue_empty(sk))
mask |= POLLIN | POLLRDNORM;
/* If there is something in the queue then we can read. */
if (transport->stream_is_active(vsk) &&
!(sk->sk_shutdown & RCV_SHUTDOWN)) {
bool data_ready_now = false;
int ret = transport->notify_poll_in(
vsk, 1, &data_ready_now);
if (ret < 0) {
mask |= POLLERR;
} else {
if (data_ready_now)
mask |= POLLIN | POLLRDNORM;
}
}
/* Sockets whose connections have been closed, reset, or
* terminated should also be considered read, and we check the
* shutdown flag for that.
*/
if (sk->sk_shutdown & RCV_SHUTDOWN ||
vsk->peer_shutdown & SEND_SHUTDOWN) {
mask |= POLLIN | POLLRDNORM;
}
/* Connected sockets that can produce data can be written. */
if (sk->sk_state == SS_CONNECTED) {
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
bool space_avail_now = false;
int ret = transport->notify_poll_out(
vsk, 1, &space_avail_now);
if (ret < 0) {
mask |= POLLERR;
} else {
if (space_avail_now)
/* Remove POLLWRBAND since INET
* sockets are not setting it.
*/
mask |= POLLOUT | POLLWRNORM;
}
}
}
/* Simulate INET socket poll behaviors, which sets
* POLLOUT|POLLWRNORM when peer is closed and nothing to read,
* but local send is not shutdown.
*/
if (sk->sk_state == SS_UNCONNECTED) {
if (!(sk->sk_shutdown & SEND_SHUTDOWN))
mask |= POLLOUT | POLLWRNORM;
}
release_sock(sk);
}
return mask;
}
static int vsock_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
int err;
struct sock *sk;
struct vsock_sock *vsk;
struct sockaddr_vm *remote_addr;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
/* For now, MSG_DONTWAIT is always assumed... */
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
lock_sock(sk);
if (!vsock_addr_bound(&vsk->local_addr)) {
struct sockaddr_vm local_addr;
vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
err = __vsock_bind(sk, &local_addr);
if (err != 0)
goto out;
}
/* If the provided message contains an address, use that. Otherwise
* fall back on the socket's remote handle (if it has been connected).
*/
if (msg->msg_name &&
vsock_addr_cast(msg->msg_name, msg->msg_namelen,
&remote_addr) == 0) {
/* Ensure this address is of the right type and is a valid
* destination.
*/
if (remote_addr->svm_cid == VMADDR_CID_ANY)
remote_addr->svm_cid = transport->get_local_cid();
if (!vsock_addr_bound(remote_addr)) {
err = -EINVAL;
goto out;
}
} else if (sock->state == SS_CONNECTED) {
remote_addr = &vsk->remote_addr;
if (remote_addr->svm_cid == VMADDR_CID_ANY)
remote_addr->svm_cid = transport->get_local_cid();
/* XXX Should connect() or this function ensure remote_addr is
* bound?
*/
if (!vsock_addr_bound(&vsk->remote_addr)) {
err = -EINVAL;
goto out;
}
} else {
err = -EINVAL;
goto out;
}
if (!transport->dgram_allow(remote_addr->svm_cid,
remote_addr->svm_port)) {
err = -EINVAL;
goto out;
}
err = transport->dgram_enqueue(vsk, remote_addr, msg->msg_iov, len);
out:
release_sock(sk);
return err;
}
static int vsock_dgram_connect(struct socket *sock,
struct sockaddr *addr, int addr_len, int flags)
{
int err;
struct sock *sk;
struct vsock_sock *vsk;
struct sockaddr_vm *remote_addr;
sk = sock->sk;
vsk = vsock_sk(sk);
err = vsock_addr_cast(addr, addr_len, &remote_addr);
if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
lock_sock(sk);
vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY,
VMADDR_PORT_ANY);
sock->state = SS_UNCONNECTED;
release_sock(sk);
return 0;
} else if (err != 0)
return -EINVAL;
lock_sock(sk);
if (!vsock_addr_bound(&vsk->local_addr)) {
struct sockaddr_vm local_addr;
vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
err = __vsock_bind(sk, &local_addr);
if (err != 0)
goto out;
}
if (!transport->dgram_allow(remote_addr->svm_cid,
remote_addr->svm_port)) {
err = -EINVAL;
goto out;
}
memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
sock->state = SS_CONNECTED;
out:
release_sock(sk);
return err;
}
static int vsock_dgram_recvmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
return transport->dgram_dequeue(kiocb, vsock_sk(sock->sk), msg, len,
flags);
}
static const struct proto_ops vsock_dgram_ops = {
.family = PF_VSOCK,
.owner = THIS_MODULE,
.release = vsock_release,
.bind = vsock_bind,
.connect = vsock_dgram_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = vsock_getname,
.poll = vsock_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = vsock_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = vsock_dgram_sendmsg,
.recvmsg = vsock_dgram_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static void vsock_connect_timeout(struct work_struct *work)
{
struct sock *sk;
struct vsock_sock *vsk;
vsk = container_of(work, struct vsock_sock, dwork.work);
sk = sk_vsock(vsk);
lock_sock(sk);
if (sk->sk_state == SS_CONNECTING &&
(sk->sk_shutdown != SHUTDOWN_MASK)) {
sk->sk_state = SS_UNCONNECTED;
sk->sk_err = ETIMEDOUT;
sk->sk_error_report(sk);
}
release_sock(sk);
sock_put(sk);
}
static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
int addr_len, int flags)
{
int err;
struct sock *sk;
struct vsock_sock *vsk;
struct sockaddr_vm *remote_addr;
long timeout;
DEFINE_WAIT(wait);
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
lock_sock(sk);
/* XXX AF_UNSPEC should make us disconnect like AF_INET. */
switch (sock->state) {
case SS_CONNECTED:
err = -EISCONN;
goto out;
case SS_DISCONNECTING:
err = -EINVAL;
goto out;
case SS_CONNECTING:
/* This continues on so we can move sock into the SS_CONNECTED
* state once the connection has completed (at which point err
* will be set to zero also). Otherwise, we will either wait
* for the connection or return -EALREADY should this be a
* non-blocking call.
*/
err = -EALREADY;
break;
default:
if ((sk->sk_state == SS_LISTEN) ||
vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
err = -EINVAL;
goto out;
}
/* The hypervisor and well-known contexts do not have socket
* endpoints.
*/
if (!transport->stream_allow(remote_addr->svm_cid,
remote_addr->svm_port)) {
err = -ENETUNREACH;
goto out;
}
/* Set the remote address that we are connecting to. */
memcpy(&vsk->remote_addr, remote_addr,
sizeof(vsk->remote_addr));
/* Autobind this socket to the local address if necessary. */
if (!vsock_addr_bound(&vsk->local_addr)) {
struct sockaddr_vm local_addr;
vsock_addr_init(&local_addr, VMADDR_CID_ANY,
VMADDR_PORT_ANY);
err = __vsock_bind(sk, &local_addr);
if (err != 0)
goto out;
}
sk->sk_state = SS_CONNECTING;
err = transport->connect(vsk);
if (err < 0)
goto out;
/* Mark sock as connecting and set the error code to in
* progress in case this is a non-blocking connect.
*/
sock->state = SS_CONNECTING;
err = -EINPROGRESS;
}
/* The receive path will handle all communication until we are able to
* enter the connected state. Here we wait for the connection to be
* completed or a notification of an error.
*/
timeout = vsk->connect_timeout;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
while (sk->sk_state != SS_CONNECTED && sk->sk_err == 0) {
if (flags & O_NONBLOCK) {
/* If we're not going to block, we schedule a timeout
* function to generate a timeout on the connection
* attempt, in case the peer doesn't respond in a
* timely manner. We hold on to the socket until the
* timeout fires.
*/
sock_hold(sk);
INIT_DELAYED_WORK(&vsk->dwork,
vsock_connect_timeout);
schedule_delayed_work(&vsk->dwork, timeout);
/* Skip ahead to preserve error code set above. */
goto out_wait;
}
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
goto out_wait_error;
} else if (timeout == 0) {
err = -ETIMEDOUT;
goto out_wait_error;
}
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
if (sk->sk_err) {
err = -sk->sk_err;
goto out_wait_error;
} else
err = 0;
out_wait:
finish_wait(sk_sleep(sk), &wait);
out:
release_sock(sk);
return err;
out_wait_error:
sk->sk_state = SS_UNCONNECTED;
sock->state = SS_UNCONNECTED;
goto out_wait;
}
static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sock *listener;
int err;
struct sock *connected;
struct vsock_sock *vconnected;
long timeout;
DEFINE_WAIT(wait);
err = 0;
listener = sock->sk;
lock_sock(listener);
if (sock->type != SOCK_STREAM) {
err = -EOPNOTSUPP;
goto out;
}
if (listener->sk_state != SS_LISTEN) {
err = -EINVAL;
goto out;
}
/* Wait for children sockets to appear; these are the new sockets
* created upon connection establishment.
*/
timeout = sock_sndtimeo(listener, flags & O_NONBLOCK);
prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
while ((connected = vsock_dequeue_accept(listener)) == NULL &&
listener->sk_err == 0) {
release_sock(listener);
timeout = schedule_timeout(timeout);
lock_sock(listener);
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
goto out_wait;
} else if (timeout == 0) {
err = -EAGAIN;
goto out_wait;
}
prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
}
if (listener->sk_err)
err = -listener->sk_err;
if (connected) {
listener->sk_ack_backlog--;
lock_sock(connected);
vconnected = vsock_sk(connected);
/* If the listener socket has received an error, then we should
* reject this socket and return. Note that we simply mark the
* socket rejected, drop our reference, and let the cleanup
* function handle the cleanup; the fact that we found it in
* the listener's accept queue guarantees that the cleanup
* function hasn't run yet.
*/
if (err) {
vconnected->rejected = true;
release_sock(connected);
sock_put(connected);
goto out_wait;
}
newsock->state = SS_CONNECTED;
sock_graft(connected, newsock);
release_sock(connected);
sock_put(connected);
}
out_wait:
finish_wait(sk_sleep(listener), &wait);
out:
release_sock(listener);
return err;
}
static int vsock_listen(struct socket *sock, int backlog)
{
int err;
struct sock *sk;
struct vsock_sock *vsk;
sk = sock->sk;
lock_sock(sk);
if (sock->type != SOCK_STREAM) {
err = -EOPNOTSUPP;
goto out;
}
if (sock->state != SS_UNCONNECTED) {
err = -EINVAL;
goto out;
}
vsk = vsock_sk(sk);
if (!vsock_addr_bound(&vsk->local_addr)) {
err = -EINVAL;
goto out;
}
sk->sk_max_ack_backlog = backlog;
sk->sk_state = SS_LISTEN;
err = 0;
out:
release_sock(sk);
return err;
}
static int vsock_stream_setsockopt(struct socket *sock,
int level,
int optname,
char __user *optval,
unsigned int optlen)
{
int err;
struct sock *sk;
struct vsock_sock *vsk;
u64 val;
if (level != AF_VSOCK)
return -ENOPROTOOPT;
#define COPY_IN(_v) \
do { \
if (optlen < sizeof(_v)) { \
err = -EINVAL; \
goto exit; \
} \
if (copy_from_user(&_v, optval, sizeof(_v)) != 0) { \
err = -EFAULT; \
goto exit; \
} \
} while (0)
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
lock_sock(sk);
switch (optname) {
case SO_VM_SOCKETS_BUFFER_SIZE:
COPY_IN(val);
transport->set_buffer_size(vsk, val);
break;
case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
COPY_IN(val);
transport->set_max_buffer_size(vsk, val);
break;
case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
COPY_IN(val);
transport->set_min_buffer_size(vsk, val);
break;
case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
struct timeval tv;
COPY_IN(tv);
if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
vsk->connect_timeout = tv.tv_sec * HZ +
DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ));
if (vsk->connect_timeout == 0)
vsk->connect_timeout =
VSOCK_DEFAULT_CONNECT_TIMEOUT;
} else {
err = -ERANGE;
}
break;
}
default:
err = -ENOPROTOOPT;
break;
}
#undef COPY_IN
exit:
release_sock(sk);
return err;
}
static int vsock_stream_getsockopt(struct socket *sock,
int level, int optname,
char __user *optval,
int __user *optlen)
{
int err;
int len;
struct sock *sk;
struct vsock_sock *vsk;
u64 val;
if (level != AF_VSOCK)
return -ENOPROTOOPT;
err = get_user(len, optlen);
if (err != 0)
return err;
#define COPY_OUT(_v) \
do { \
if (len < sizeof(_v)) \
return -EINVAL; \
\
len = sizeof(_v); \
if (copy_to_user(optval, &_v, len) != 0) \
return -EFAULT; \
\
} while (0)
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
switch (optname) {
case SO_VM_SOCKETS_BUFFER_SIZE:
val = transport->get_buffer_size(vsk);
COPY_OUT(val);
break;
case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
val = transport->get_max_buffer_size(vsk);
COPY_OUT(val);
break;
case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
val = transport->get_min_buffer_size(vsk);
COPY_OUT(val);
break;
case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
struct timeval tv;
tv.tv_sec = vsk->connect_timeout / HZ;
tv.tv_usec =
(vsk->connect_timeout -
tv.tv_sec * HZ) * (1000000 / HZ);
COPY_OUT(tv);
break;
}
default:
return -ENOPROTOOPT;
}
err = put_user(len, optlen);
if (err != 0)
return -EFAULT;
#undef COPY_OUT
return 0;
}
static int vsock_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk;
struct vsock_sock *vsk;
ssize_t total_written;
long timeout;
int err;
struct vsock_transport_send_notify_data send_data;
DEFINE_WAIT(wait);
sk = sock->sk;
vsk = vsock_sk(sk);
total_written = 0;
err = 0;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
lock_sock(sk);
/* Callers should not provide a destination with stream sockets. */
if (msg->msg_namelen) {
err = sk->sk_state == SS_CONNECTED ? -EISCONN : -EOPNOTSUPP;
goto out;
}
/* Send data only if both sides are not shutdown in the direction. */
if (sk->sk_shutdown & SEND_SHUTDOWN ||
vsk->peer_shutdown & RCV_SHUTDOWN) {
err = -EPIPE;
goto out;
}
if (sk->sk_state != SS_CONNECTED ||
!vsock_addr_bound(&vsk->local_addr)) {
err = -ENOTCONN;
goto out;
}
if (!vsock_addr_bound(&vsk->remote_addr)) {
err = -EDESTADDRREQ;
goto out;
}
/* Wait for room in the produce queue to enqueue our user's data. */
timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
err = transport->notify_send_init(vsk, &send_data);
if (err < 0)
goto out;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
while (total_written < len) {
ssize_t written;
while (vsock_stream_has_space(vsk) == 0 &&
sk->sk_err == 0 &&
!(sk->sk_shutdown & SEND_SHUTDOWN) &&
!(vsk->peer_shutdown & RCV_SHUTDOWN)) {
/* Don't wait for non-blocking sockets. */
if (timeout == 0) {
err = -EAGAIN;
goto out_wait;
}
err = transport->notify_send_pre_block(vsk, &send_data);
if (err < 0)
goto out_wait;
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
goto out_wait;
} else if (timeout == 0) {
err = -EAGAIN;
goto out_wait;
}
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
}
/* These checks occur both as part of and after the loop
* conditional since we need to check before and after
* sleeping.
*/
if (sk->sk_err) {
err = -sk->sk_err;
goto out_wait;
} else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
(vsk->peer_shutdown & RCV_SHUTDOWN)) {
err = -EPIPE;
goto out_wait;
}
err = transport->notify_send_pre_enqueue(vsk, &send_data);
if (err < 0)
goto out_wait;
/* Note that enqueue will only write as many bytes as are free
* in the produce queue, so we don't need to ensure len is
* smaller than the queue size. It is the caller's
* responsibility to check how many bytes we were able to send.
*/
written = transport->stream_enqueue(
vsk, msg->msg_iov,
len - total_written);
if (written < 0) {
err = -ENOMEM;
goto out_wait;
}
total_written += written;
err = transport->notify_send_post_enqueue(
vsk, written, &send_data);
if (err < 0)
goto out_wait;
}
out_wait:
if (total_written > 0)
err = total_written;
finish_wait(sk_sleep(sk), &wait);
out:
release_sock(sk);
return err;
}
static int
vsock_stream_recvmsg(struct kiocb *kiocb,
struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk;
struct vsock_sock *vsk;
int err;
size_t target;
ssize_t copied;
long timeout;
struct vsock_transport_recv_notify_data recv_data;
DEFINE_WAIT(wait);
sk = sock->sk;
vsk = vsock_sk(sk);
err = 0;
lock_sock(sk);
if (sk->sk_state != SS_CONNECTED) {
/* Recvmsg is supposed to return 0 if a peer performs an
* orderly shutdown. Differentiate between that case and when a
* peer has not connected or a local shutdown occured with the
* SOCK_DONE flag.
*/
if (sock_flag(sk, SOCK_DONE))
err = 0;
else
err = -ENOTCONN;
goto out;
}
if (flags & MSG_OOB) {
err = -EOPNOTSUPP;
goto out;
}
/* We don't check peer_shutdown flag here since peer may actually shut
* down, but there can be data in the queue that a local socket can
* receive.
*/
if (sk->sk_shutdown & RCV_SHUTDOWN) {
err = 0;
goto out;
}
/* It is valid on Linux to pass in a zero-length receive buffer. This
* is not an error. We may as well bail out now.
*/
if (!len) {
err = 0;
goto out;
}
/* We must not copy less than target bytes into the user's buffer
* before returning successfully, so we wait for the consume queue to
* have that much data to consume before dequeueing. Note that this
* makes it impossible to handle cases where target is greater than the
* queue size.
*/
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
if (target >= transport->stream_rcvhiwat(vsk)) {
err = -ENOMEM;
goto out;
}
timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
copied = 0;
err = transport->notify_recv_init(vsk, target, &recv_data);
if (err < 0)
goto out;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
while (1) {
s64 ready = vsock_stream_has_data(vsk);
if (ready < 0) {
/* Invalid queue pair content. XXX This should be
* changed to a connection reset in a later change.
*/
err = -ENOMEM;
goto out_wait;
} else if (ready > 0) {
ssize_t read;
err = transport->notify_recv_pre_dequeue(
vsk, target, &recv_data);
if (err < 0)
break;
read = transport->stream_dequeue(
vsk, msg->msg_iov,
len - copied, flags);
if (read < 0) {
err = -ENOMEM;
break;
}
copied += read;
err = transport->notify_recv_post_dequeue(
vsk, target, read,
!(flags & MSG_PEEK), &recv_data);
if (err < 0)
goto out_wait;
if (read >= target || flags & MSG_PEEK)
break;
target -= read;
} else {
if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN)
|| (vsk->peer_shutdown & SEND_SHUTDOWN)) {
break;
}
/* Don't wait for non-blocking sockets. */
if (timeout == 0) {
err = -EAGAIN;
break;
}
err = transport->notify_recv_pre_block(
vsk, target, &recv_data);
if (err < 0)
break;
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
break;
} else if (timeout == 0) {
err = -EAGAIN;
break;
}
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
}
}
if (sk->sk_err)
err = -sk->sk_err;
else if (sk->sk_shutdown & RCV_SHUTDOWN)
err = 0;
if (copied > 0) {
/* We only do these additional bookkeeping/notification steps
* if we actually copied something out of the queue pair
* instead of just peeking ahead.
*/
if (!(flags & MSG_PEEK)) {
/* If the other side has shutdown for sending and there
* is nothing more to read, then modify the socket
* state.
*/
if (vsk->peer_shutdown & SEND_SHUTDOWN) {
if (vsock_stream_has_data(vsk) <= 0) {
sk->sk_state = SS_UNCONNECTED;
sock_set_flag(sk, SOCK_DONE);
sk->sk_state_change(sk);
}
}
}
err = copied;
}
out_wait:
finish_wait(sk_sleep(sk), &wait);
out:
release_sock(sk);
return err;
}
static const struct proto_ops vsock_stream_ops = {
.family = PF_VSOCK,
.owner = THIS_MODULE,
.release = vsock_release,
.bind = vsock_bind,
.connect = vsock_stream_connect,
.socketpair = sock_no_socketpair,
.accept = vsock_accept,
.getname = vsock_getname,
.poll = vsock_poll,
.ioctl = sock_no_ioctl,
.listen = vsock_listen,
.shutdown = vsock_shutdown,
.setsockopt = vsock_stream_setsockopt,
.getsockopt = vsock_stream_getsockopt,
.sendmsg = vsock_stream_sendmsg,
.recvmsg = vsock_stream_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static int vsock_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
if (!sock)
return -EINVAL;
if (protocol && protocol != PF_VSOCK)
return -EPROTONOSUPPORT;
switch (sock->type) {
case SOCK_DGRAM:
sock->ops = &vsock_dgram_ops;
break;
case SOCK_STREAM:
sock->ops = &vsock_stream_ops;
break;
default:
return -ESOCKTNOSUPPORT;
}
sock->state = SS_UNCONNECTED;
return __vsock_create(net, sock, NULL, GFP_KERNEL, 0) ? 0 : -ENOMEM;
}
static const struct net_proto_family vsock_family_ops = {
.family = AF_VSOCK,
.create = vsock_create,
.owner = THIS_MODULE,
};
static long vsock_dev_do_ioctl(struct file *filp,
unsigned int cmd, void __user *ptr)
{
u32 __user *p = ptr;
int retval = 0;
switch (cmd) {
case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
if (put_user(transport->get_local_cid(), p) != 0)
retval = -EFAULT;
break;
default:
pr_err("Unknown ioctl %d\n", cmd);
retval = -EINVAL;
}
return retval;
}
static long vsock_dev_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg);
}
#ifdef CONFIG_COMPAT
static long vsock_dev_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg));
}
#endif
static const struct file_operations vsock_device_ops = {
.owner = THIS_MODULE,
.unlocked_ioctl = vsock_dev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vsock_dev_compat_ioctl,
#endif
.open = nonseekable_open,
};
static struct miscdevice vsock_device = {
.name = "vsock",
.minor = MISC_DYNAMIC_MINOR,
.fops = &vsock_device_ops,
};
static int __vsock_core_init(void)
{
int err;
vsock_init_tables();
err = misc_register(&vsock_device);
if (err) {
pr_err("Failed to register misc device\n");
return -ENOENT;
}
err = proto_register(&vsock_proto, 1); /* we want our slab */
if (err) {
pr_err("Cannot register vsock protocol\n");
goto err_misc_deregister;
}
err = sock_register(&vsock_family_ops);
if (err) {
pr_err("could not register af_vsock (%d) address family: %d\n",
AF_VSOCK, err);
goto err_unregister_proto;
}
return 0;
err_unregister_proto:
proto_unregister(&vsock_proto);
err_misc_deregister:
misc_deregister(&vsock_device);
return err;
}
int vsock_core_init(const struct vsock_transport *t)
{
int retval = mutex_lock_interruptible(&vsock_register_mutex);
if (retval)
return retval;
if (transport) {
retval = -EBUSY;
goto out;
}
transport = t;
retval = __vsock_core_init();
if (retval)
transport = NULL;
out:
mutex_unlock(&vsock_register_mutex);
return retval;
}
EXPORT_SYMBOL_GPL(vsock_core_init);
void vsock_core_exit(void)
{
mutex_lock(&vsock_register_mutex);
misc_deregister(&vsock_device);
sock_unregister(AF_VSOCK);
proto_unregister(&vsock_proto);
/* We do not want the assignment below re-ordered. */
mb();
transport = NULL;
mutex_unlock(&vsock_register_mutex);
}
EXPORT_SYMBOL_GPL(vsock_core_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Socket Family");
MODULE_VERSION("1.0.0.0-k");
MODULE_LICENSE("GPL v2");
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_5697_0 |
crossvul-cpp_data_bad_4051_0 | /*
* Copyright (C) 1996-1998,2012 Michael R. Elkins <me@mutt.org>
* Copyright (C) 1996-1999 Brandon Long <blong@fiction.net>
* Copyright (C) 1999-2009,2012,2017 Brendan Cully <brendan@kublai.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/* Support for IMAP4rev1, with the occasional nod to IMAP 4. */
#if HAVE_CONFIG_H
# include "config.h"
#endif
#include "mutt.h"
#include "mx.h"
#include "mailbox.h"
#include "globals.h"
#include "sort.h"
#include "browser.h"
#include "imap_private.h"
#if defined(USE_SSL)
# include "mutt_ssl.h"
#endif
#if defined(USE_ZLIB)
# include "mutt_zstrm.h"
#endif
#include "buffy.h"
#if USE_HCACHE
#include "hcache.h"
#endif
#include <unistd.h>
#include <ctype.h>
#include <string.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
/* imap forward declarations */
static char* imap_get_flags (LIST** hflags, char* s);
static int imap_check_capabilities (IMAP_DATA* idata);
static void imap_set_flag (IMAP_DATA* idata, int aclbit, int flag,
const char* str, char* flags, size_t flsize);
/* imap_access: Check permissions on an IMAP mailbox.
* TODO: ACL checks. Right now we assume if it exists we can
* mess with it. */
int imap_access (const char* path)
{
IMAP_DATA* idata;
IMAP_MBOX mx;
char buf[LONG_STRING*2];
char mailbox[LONG_STRING];
char mbox[LONG_STRING];
int rc;
if (imap_parse_path (path, &mx))
return -1;
if (!(idata = imap_conn_find (&mx.account,
option (OPTIMAPPASSIVE) ? MUTT_IMAP_CONN_NONEW : 0)))
{
FREE (&mx.mbox);
return -1;
}
imap_fix_path (idata, mx.mbox, mailbox, sizeof (mailbox));
if (!*mailbox)
strfcpy (mailbox, "INBOX", sizeof (mailbox));
/* we may already be in the folder we're checking */
if (!ascii_strcmp(idata->mailbox, mx.mbox))
{
FREE (&mx.mbox);
return 0;
}
FREE (&mx.mbox);
if (imap_mboxcache_get (idata, mailbox, 0))
{
dprint (3, (debugfile, "imap_access: found %s in cache\n", mailbox));
return 0;
}
imap_munge_mbox_name (idata, mbox, sizeof (mbox), mailbox);
if (mutt_bit_isset (idata->capabilities, IMAP4REV1))
snprintf (buf, sizeof (buf), "STATUS %s (UIDVALIDITY)", mbox);
else if (mutt_bit_isset (idata->capabilities, STATUS))
snprintf (buf, sizeof (buf), "STATUS %s (UID-VALIDITY)", mbox);
else
{
dprint (2, (debugfile, "imap_access: STATUS not supported?\n"));
return -1;
}
if ((rc = imap_exec (idata, buf, IMAP_CMD_FAIL_OK)) < 0)
{
dprint (1, (debugfile, "imap_access: Can't check STATUS of %s\n", mbox));
return rc;
}
return 0;
}
int imap_create_mailbox (IMAP_DATA* idata, char* mailbox)
{
char buf[LONG_STRING*2], mbox[LONG_STRING];
imap_munge_mbox_name (idata, mbox, sizeof (mbox), mailbox);
snprintf (buf, sizeof (buf), "CREATE %s", mbox);
if (imap_exec (idata, buf, 0) != 0)
{
mutt_error (_("CREATE failed: %s"), imap_cmd_trailer (idata));
return -1;
}
return 0;
}
int imap_rename_mailbox (IMAP_DATA* idata, IMAP_MBOX* mx, const char* newname)
{
char oldmbox[LONG_STRING];
char newmbox[LONG_STRING];
BUFFER *b;
int rc = 0;
imap_munge_mbox_name (idata, oldmbox, sizeof (oldmbox), mx->mbox);
imap_munge_mbox_name (idata, newmbox, sizeof (newmbox), newname);
b = mutt_buffer_pool_get ();
mutt_buffer_printf (b, "RENAME %s %s", oldmbox, newmbox);
if (imap_exec (idata, mutt_b2s (b), 0) != 0)
rc = -1;
mutt_buffer_pool_release (&b);
return rc;
}
int imap_delete_mailbox (CONTEXT* ctx, IMAP_MBOX mx)
{
char buf[LONG_STRING*2], mbox[LONG_STRING];
IMAP_DATA *idata;
if (!ctx || !ctx->data)
{
if (!(idata = imap_conn_find (&mx.account,
option (OPTIMAPPASSIVE) ? MUTT_IMAP_CONN_NONEW : 0)))
{
FREE (&mx.mbox);
return -1;
}
}
else
{
idata = ctx->data;
}
imap_munge_mbox_name (idata, mbox, sizeof (mbox), mx.mbox);
snprintf (buf, sizeof (buf), "DELETE %s", mbox);
if (imap_exec ((IMAP_DATA*) idata, buf, 0) != 0)
return -1;
return 0;
}
/* imap_logout_all: close all open connections. Quick and dirty until we can
* make sure we've got all the context we need. */
void imap_logout_all (void)
{
CONNECTION* conn;
CONNECTION* tmp;
conn = mutt_socket_head ();
while (conn)
{
tmp = conn->next;
if (conn->account.type == MUTT_ACCT_TYPE_IMAP && conn->fd >= 0)
{
mutt_message (_("Closing connection to %s..."), conn->account.host);
imap_logout ((IMAP_DATA**) (void*) &conn->data);
mutt_clear_error ();
mutt_socket_free (conn);
}
conn = tmp;
}
}
/* imap_read_literal: read bytes bytes from server into file. Not explicitly
* buffered, relies on FILE buffering. NOTE: strips \r from \r\n.
* Apparently even literals use \r\n-terminated strings ?! */
int imap_read_literal (FILE* fp, IMAP_DATA* idata, unsigned int bytes, progress_t* pbar)
{
unsigned int pos;
char c;
int r = 0;
dprint (2, (debugfile, "imap_read_literal: reading %ld bytes\n", bytes));
for (pos = 0; pos < bytes; pos++)
{
if (mutt_socket_readchar (idata->conn, &c) != 1)
{
dprint (1, (debugfile, "imap_read_literal: error during read, %ld bytes read\n", pos));
idata->status = IMAP_FATAL;
return -1;
}
#if 1
if (r == 1 && c != '\n')
fputc ('\r', fp);
if (c == '\r')
{
r = 1;
continue;
}
else
r = 0;
#endif
fputc (c, fp);
if (pbar && !(pos % 1024))
mutt_progress_update (pbar, pos, -1);
#ifdef DEBUG
if (debuglevel >= IMAP_LOG_LTRL)
fputc (c, debugfile);
#endif
}
return 0;
}
/* imap_expunge_mailbox: Purge IMAP portion of expunged messages from the
* context. Must not be done while something has a handle on any headers
* (eg inside pager or editor). That is, check IMAP_REOPEN_ALLOW. */
void imap_expunge_mailbox (IMAP_DATA* idata)
{
HEADER* h;
int i, cacheno;
short old_sort;
#ifdef USE_HCACHE
idata->hcache = imap_hcache_open (idata, NULL);
#endif
old_sort = Sort;
Sort = SORT_ORDER;
mutt_sort_headers (idata->ctx, 0);
for (i = 0; i < idata->ctx->msgcount; i++)
{
h = idata->ctx->hdrs[i];
if (h->index == INT_MAX)
{
dprint (2, (debugfile, "Expunging message UID %u.\n", HEADER_DATA (h)->uid));
h->active = 0;
idata->ctx->size -= h->content->length;
imap_cache_del (idata, h);
#if USE_HCACHE
imap_hcache_del (idata, HEADER_DATA(h)->uid);
#endif
/* free cached body from disk, if necessary */
cacheno = HEADER_DATA(h)->uid % IMAP_CACHE_LEN;
if (idata->cache[cacheno].uid == HEADER_DATA(h)->uid &&
idata->cache[cacheno].path)
{
unlink (idata->cache[cacheno].path);
FREE (&idata->cache[cacheno].path);
}
int_hash_delete (idata->uid_hash, HEADER_DATA(h)->uid, h, NULL);
imap_free_header_data ((IMAP_HEADER_DATA**)&h->data);
}
else
{
h->index = i;
/* Mutt has several places where it turns off h->active as a
* hack. For example to avoid FLAG updates, or to exclude from
* imap_exec_msgset.
*
* Unfortunately, when a reopen is allowed and the IMAP_EXPUNGE_PENDING
* flag becomes set (e.g. a flag update to a modified header),
* this function will be called by imap_cmd_finish().
*
* The mx_update_tables() will free and remove these "inactive" headers,
* despite that an EXPUNGE was not received for them.
* This would result in memory leaks and segfaults due to dangling
* pointers in the msn_index and uid_hash.
*
* So this is another hack to work around the hacks. We don't want to
* remove the messages, so make sure active is on.
*/
h->active = 1;
}
}
#if USE_HCACHE
imap_hcache_close (idata);
#endif
/* We may be called on to expunge at any time. We can't rely on the caller
* to always know to rethread */
mx_update_tables (idata->ctx, 0);
Sort = old_sort;
mutt_sort_headers (idata->ctx, 1);
}
/* imap_check_capabilities: make sure we can log in to this server. */
static int imap_check_capabilities (IMAP_DATA* idata)
{
if (imap_exec (idata, "CAPABILITY", 0) != 0)
{
imap_error ("imap_check_capabilities", idata->buf);
return -1;
}
if (!(mutt_bit_isset(idata->capabilities,IMAP4) ||
mutt_bit_isset(idata->capabilities,IMAP4REV1)))
{
mutt_error _("This IMAP server is ancient. Mutt does not work with it.");
mutt_sleep (2); /* pause a moment to let the user see the error */
return -1;
}
return 0;
}
/**
* imap_conn_find
*
* Returns an authenticated IMAP connection matching account, or NULL
* if that isn't possible.
*
* flags:
* MUTT_IMAP_CONN_NONEW - must be an existing connection
* MUTT_IMAP_CONN_NOSELECT - must not be in the IMAP_SELECTED state.
*/
IMAP_DATA* imap_conn_find (const ACCOUNT* account, int flags)
{
CONNECTION* conn = NULL;
ACCOUNT* creds = NULL;
IMAP_DATA* idata = NULL;
int new = 0;
while ((conn = mutt_conn_find (conn, account)))
{
if (!creds)
creds = &conn->account;
else
memcpy (&conn->account, creds, sizeof (ACCOUNT));
idata = (IMAP_DATA*)conn->data;
if (flags & MUTT_IMAP_CONN_NONEW)
{
if (!idata)
{
/* This should only happen if we've come to the end of the list */
mutt_socket_free (conn);
return NULL;
}
else if (idata->state < IMAP_AUTHENTICATED)
continue;
}
if (flags & MUTT_IMAP_CONN_NOSELECT && idata && idata->state >= IMAP_SELECTED)
continue;
if (idata && idata->status == IMAP_FATAL)
continue;
break;
}
if (!conn)
return NULL; /* this happens when the initial connection fails */
/* The current connection is a new connection */
if (!idata)
{
idata = imap_new_idata ();
conn->data = idata;
idata->conn = conn;
new = 1;
}
if (idata->state == IMAP_DISCONNECTED)
imap_open_connection (idata);
if (idata->state == IMAP_CONNECTED)
{
if (!imap_authenticate (idata))
{
idata->state = IMAP_AUTHENTICATED;
FREE (&idata->capstr);
new = 1;
if (idata->conn->ssf)
dprint (2, (debugfile, "Communication encrypted at %d bits\n",
idata->conn->ssf));
}
else
mutt_account_unsetpass (&idata->conn->account);
}
if (new && idata->state == IMAP_AUTHENTICATED)
{
/* capabilities may have changed */
imap_exec (idata, "CAPABILITY", IMAP_CMD_FAIL_OK);
#if defined(USE_ZLIB)
/* RFC 4978 */
if (mutt_bit_isset (idata->capabilities, COMPRESS_DEFLATE))
{
if (option (OPTIMAPDEFLATE) &&
imap_exec (idata, "COMPRESS DEFLATE", IMAP_CMD_FAIL_OK) == 0)
mutt_zstrm_wrap_conn (idata->conn);
}
#endif
/* enable RFC6855, if the server supports that */
if (mutt_bit_isset (idata->capabilities, ENABLE))
imap_exec (idata, "ENABLE UTF8=ACCEPT", IMAP_CMD_QUEUE);
/* enable QRESYNC. Advertising QRESYNC also means CONDSTORE
* is supported (even if not advertised), so flip that bit. */
if (mutt_bit_isset (idata->capabilities, QRESYNC))
{
mutt_bit_set (idata->capabilities, CONDSTORE);
if (option (OPTIMAPQRESYNC))
imap_exec (idata, "ENABLE QRESYNC", IMAP_CMD_QUEUE);
}
/* get root delimiter, '/' as default */
idata->delim = '/';
imap_exec (idata, "LIST \"\" \"\"", IMAP_CMD_QUEUE);
if (option (OPTIMAPCHECKSUBSCRIBED))
imap_exec (idata, "LSUB \"\" \"*\"", IMAP_CMD_QUEUE);
/* we may need the root delimiter before we open a mailbox */
imap_exec (idata, NULL, IMAP_CMD_FAIL_OK);
}
if (idata->state < IMAP_AUTHENTICATED)
return NULL;
return idata;
}
int imap_open_connection (IMAP_DATA* idata)
{
if (mutt_socket_open (idata->conn) < 0)
return -1;
idata->state = IMAP_CONNECTED;
if (imap_cmd_step (idata) != IMAP_CMD_OK)
{
imap_close_connection (idata);
return -1;
}
if (ascii_strncasecmp ("* OK", idata->buf, 4) == 0)
{
if (ascii_strncasecmp ("* OK [CAPABILITY", idata->buf, 16)
&& imap_check_capabilities (idata))
goto bail;
#if defined(USE_SSL)
/* Attempt STARTTLS if available and desired. */
if (!idata->conn->ssf && (option(OPTSSLFORCETLS) ||
mutt_bit_isset (idata->capabilities, STARTTLS)))
{
int rc;
if (option(OPTSSLFORCETLS))
rc = MUTT_YES;
else if ((rc = query_quadoption (OPT_SSLSTARTTLS,
_("Secure connection with TLS?"))) == -1)
goto err_close_conn;
if (rc == MUTT_YES)
{
if ((rc = imap_exec (idata, "STARTTLS", IMAP_CMD_FAIL_OK)) == -1)
goto bail;
if (rc != -2)
{
if (mutt_ssl_starttls (idata->conn))
{
mutt_error (_("Could not negotiate TLS connection"));
mutt_sleep (1);
goto err_close_conn;
}
else
{
/* RFC 2595 demands we recheck CAPABILITY after TLS completes. */
if (imap_exec (idata, "CAPABILITY", 0))
goto bail;
}
}
}
}
if (option(OPTSSLFORCETLS) && ! idata->conn->ssf)
{
mutt_error _("Encrypted connection unavailable");
mutt_sleep (1);
goto err_close_conn;
}
#endif
}
else if (ascii_strncasecmp ("* PREAUTH", idata->buf, 9) == 0)
{
idata->state = IMAP_AUTHENTICATED;
if (imap_check_capabilities (idata) != 0)
goto bail;
FREE (&idata->capstr);
}
else
{
imap_error ("imap_open_connection()", idata->buf);
goto bail;
}
return 0;
#if defined(USE_SSL)
err_close_conn:
imap_close_connection (idata);
#endif
bail:
FREE (&idata->capstr);
return -1;
}
void imap_close_connection(IMAP_DATA* idata)
{
if (idata->state != IMAP_DISCONNECTED)
{
mutt_socket_close (idata->conn);
idata->state = IMAP_DISCONNECTED;
}
idata->seqno = idata->nextcmd = idata->lastcmd = idata->status = 0;
memset (idata->cmds, 0, sizeof (IMAP_COMMAND) * idata->cmdslots);
}
/* imap_get_flags: Make a simple list out of a FLAGS response.
* return stream following FLAGS response */
static char* imap_get_flags (LIST** hflags, char* s)
{
LIST* flags;
char* flag_word;
char ctmp;
/* sanity-check string */
if (ascii_strncasecmp ("FLAGS", s, 5) != 0)
{
dprint (1, (debugfile, "imap_get_flags: not a FLAGS response: %s\n",
s));
return NULL;
}
s += 5;
SKIPWS(s);
if (*s != '(')
{
dprint (1, (debugfile, "imap_get_flags: bogus FLAGS response: %s\n",
s));
return NULL;
}
/* create list, update caller's flags handle */
flags = mutt_new_list();
*hflags = flags;
while (*s && *s != ')')
{
s++;
SKIPWS(s);
flag_word = s;
while (*s && (*s != ')') && !ISSPACE (*s))
s++;
ctmp = *s;
*s = '\0';
if (*flag_word)
mutt_add_list (flags, flag_word);
*s = ctmp;
}
/* note bad flags response */
if (*s != ')')
{
dprint (1, (debugfile,
"imap_get_flags: Unterminated FLAGS response: %s\n", s));
mutt_free_list (hflags);
return NULL;
}
s++;
return s;
}
static int imap_open_mailbox (CONTEXT* ctx)
{
IMAP_DATA *idata;
IMAP_STATUS* status;
char buf[LONG_STRING];
char bufout[LONG_STRING*2];
int count = 0;
IMAP_MBOX mx, pmx;
int rc;
const char *condstore;
if (imap_parse_path (ctx->path, &mx))
{
mutt_error (_("%s is an invalid IMAP path"), ctx->path);
return -1;
}
/* we require a connection which isn't currently in IMAP_SELECTED state */
if (!(idata = imap_conn_find (&(mx.account), MUTT_IMAP_CONN_NOSELECT)))
goto fail_noidata;
/* once again the context is new */
ctx->data = idata;
/* Clean up path and replace the one in the ctx */
imap_fix_path (idata, mx.mbox, buf, sizeof (buf));
if (!*buf)
strfcpy (buf, "INBOX", sizeof (buf));
FREE(&(idata->mailbox));
idata->mailbox = safe_strdup (buf);
imap_qualify_path (buf, sizeof (buf), &mx, idata->mailbox);
FREE (&(ctx->path));
FREE (&(ctx->realpath));
ctx->path = safe_strdup (buf);
ctx->realpath = safe_strdup (ctx->path);
idata->ctx = ctx;
/* clear mailbox status */
idata->status = 0;
memset (idata->ctx->rights, 0, sizeof (idata->ctx->rights));
idata->newMailCount = 0;
idata->max_msn = 0;
if (!ctx->quiet)
mutt_message (_("Selecting %s..."), idata->mailbox);
imap_munge_mbox_name (idata, buf, sizeof(buf), idata->mailbox);
/* pipeline ACL test */
if (mutt_bit_isset (idata->capabilities, ACL))
{
snprintf (bufout, sizeof (bufout), "MYRIGHTS %s", buf);
imap_exec (idata, bufout, IMAP_CMD_QUEUE);
}
/* assume we have all rights if ACL is unavailable */
else
{
mutt_bit_set (idata->ctx->rights, MUTT_ACL_LOOKUP);
mutt_bit_set (idata->ctx->rights, MUTT_ACL_READ);
mutt_bit_set (idata->ctx->rights, MUTT_ACL_SEEN);
mutt_bit_set (idata->ctx->rights, MUTT_ACL_WRITE);
mutt_bit_set (idata->ctx->rights, MUTT_ACL_INSERT);
mutt_bit_set (idata->ctx->rights, MUTT_ACL_POST);
mutt_bit_set (idata->ctx->rights, MUTT_ACL_CREATE);
mutt_bit_set (idata->ctx->rights, MUTT_ACL_DELETE);
}
/* pipeline the postponed count if possible */
pmx.mbox = NULL;
if (mx_is_imap (Postponed) && !imap_parse_path (Postponed, &pmx)
&& mutt_account_match (&pmx.account, &mx.account))
imap_status (Postponed, 1);
FREE (&pmx.mbox);
#if USE_HCACHE
if (mutt_bit_isset (idata->capabilities, CONDSTORE) &&
option (OPTIMAPCONDSTORE))
condstore = " (CONDSTORE)";
else
#endif
condstore = "";
snprintf (bufout, sizeof (bufout), "%s %s%s",
ctx->readonly ? "EXAMINE" : "SELECT",
buf, condstore);
idata->state = IMAP_SELECTED;
imap_cmd_start (idata, bufout);
status = imap_mboxcache_get (idata, idata->mailbox, 1);
do
{
char *pc;
if ((rc = imap_cmd_step (idata)) != IMAP_CMD_CONTINUE)
break;
pc = idata->buf + 2;
/* Obtain list of available flags here, may be overridden by a
* PERMANENTFLAGS tag in the OK response */
if (ascii_strncasecmp ("FLAGS", pc, 5) == 0)
{
/* don't override PERMANENTFLAGS */
if (!idata->flags)
{
dprint (3, (debugfile, "Getting mailbox FLAGS\n"));
if ((pc = imap_get_flags (&(idata->flags), pc)) == NULL)
goto fail;
}
}
/* PERMANENTFLAGS are massaged to look like FLAGS, then override FLAGS */
else if (ascii_strncasecmp ("OK [PERMANENTFLAGS", pc, 18) == 0)
{
dprint (3, (debugfile, "Getting mailbox PERMANENTFLAGS\n"));
/* safe to call on NULL */
mutt_free_list (&(idata->flags));
/* skip "OK [PERMANENT" so syntax is the same as FLAGS */
pc += 13;
if ((pc = imap_get_flags (&(idata->flags), pc)) == NULL)
goto fail;
}
/* save UIDVALIDITY for the header cache */
else if (ascii_strncasecmp ("OK [UIDVALIDITY", pc, 14) == 0)
{
dprint (3, (debugfile, "Getting mailbox UIDVALIDITY\n"));
pc += 3;
pc = imap_next_word (pc);
if (mutt_atoui (pc, &idata->uid_validity) < 0)
goto fail;
status->uidvalidity = idata->uid_validity;
}
else if (ascii_strncasecmp ("OK [UIDNEXT", pc, 11) == 0)
{
dprint (3, (debugfile, "Getting mailbox UIDNEXT\n"));
pc += 3;
pc = imap_next_word (pc);
if (mutt_atoui (pc, &idata->uidnext) < 0)
goto fail;
status->uidnext = idata->uidnext;
}
else if (ascii_strncasecmp ("OK [HIGHESTMODSEQ", pc, 17) == 0)
{
dprint (3, (debugfile, "Getting mailbox HIGHESTMODSEQ\n"));
pc += 3;
pc = imap_next_word (pc);
if (mutt_atoull (pc, &idata->modseq) < 0)
goto fail;
status->modseq = idata->modseq;
}
else if (ascii_strncasecmp ("OK [NOMODSEQ", pc, 12) == 0)
{
dprint (3, (debugfile, "Mailbox has NOMODSEQ set\n"));
status->modseq = idata->modseq = 0;
}
else
{
pc = imap_next_word (pc);
if (!ascii_strncasecmp ("EXISTS", pc, 6))
{
count = idata->newMailCount;
idata->newMailCount = 0;
}
}
}
while (rc == IMAP_CMD_CONTINUE);
if (rc == IMAP_CMD_NO)
{
char *s;
s = imap_next_word (idata->buf); /* skip seq */
s = imap_next_word (s); /* Skip response */
mutt_error ("%s", s);
mutt_sleep (2);
goto fail;
}
if (rc != IMAP_CMD_OK)
goto fail;
/* check for READ-ONLY notification */
if (!ascii_strncasecmp (imap_get_qualifier (idata->buf), "[READ-ONLY]", 11) &&
!mutt_bit_isset (idata->capabilities, ACL))
{
dprint (2, (debugfile, "Mailbox is read-only.\n"));
ctx->readonly = 1;
}
#ifdef DEBUG
/* dump the mailbox flags we've found */
if (debuglevel > 2)
{
if (!idata->flags)
dprint (3, (debugfile, "No folder flags found\n"));
else
{
LIST* t = idata->flags;
dprint (3, (debugfile, "Mailbox flags: "));
t = t->next;
while (t)
{
dprint (3, (debugfile, "[%s] ", t->data));
t = t->next;
}
dprint (3, (debugfile, "\n"));
}
}
#endif
if (!(mutt_bit_isset(idata->ctx->rights, MUTT_ACL_DELETE) ||
mutt_bit_isset(idata->ctx->rights, MUTT_ACL_SEEN) ||
mutt_bit_isset(idata->ctx->rights, MUTT_ACL_WRITE) ||
mutt_bit_isset(idata->ctx->rights, MUTT_ACL_INSERT)))
ctx->readonly = 1;
ctx->hdrmax = count;
ctx->hdrs = safe_calloc (count, sizeof (HEADER *));
ctx->v2r = safe_calloc (count, sizeof (int));
ctx->msgcount = 0;
if (count && (imap_read_headers (idata, 1, count, 1) < 0))
{
mutt_error _("Error opening mailbox");
mutt_sleep (1);
goto fail;
}
imap_disallow_reopen (ctx);
dprint (2, (debugfile, "imap_open_mailbox: msgcount is %d\n", ctx->msgcount));
FREE (&mx.mbox);
return 0;
fail:
if (idata->state == IMAP_SELECTED)
idata->state = IMAP_AUTHENTICATED;
fail_noidata:
FREE (&mx.mbox);
return -1;
}
static int imap_open_mailbox_append (CONTEXT *ctx, int flags)
{
IMAP_DATA *idata;
char buf[LONG_STRING];
char mailbox[LONG_STRING];
IMAP_MBOX mx;
int rc;
if (imap_parse_path (ctx->path, &mx))
return -1;
/* in APPEND mode, we appear to hijack an existing IMAP connection -
* ctx is brand new and mostly empty */
if (!(idata = imap_conn_find (&(mx.account), 0)))
{
FREE (&mx.mbox);
return -1;
}
ctx->data = idata;
imap_fix_path (idata, mx.mbox, mailbox, sizeof (mailbox));
if (!*mailbox)
strfcpy (mailbox, "INBOX", sizeof (mailbox));
FREE (&mx.mbox);
if ((rc = imap_access (ctx->path)) == 0)
return 0;
if (rc == -1)
return -1;
snprintf (buf, sizeof (buf), _("Create %s?"), mailbox);
if (option (OPTCONFIRMCREATE) && mutt_yesorno (buf, 1) < 1)
return -1;
if (imap_create_mailbox (idata, mailbox) < 0)
return -1;
return 0;
}
/* imap_logout: Gracefully log out of server. */
void imap_logout (IMAP_DATA** idata)
{
/* we set status here to let imap_handle_untagged know we _expect_ to
* receive a bye response (so it doesn't freak out and close the conn) */
(*idata)->status = IMAP_BYE;
imap_cmd_start (*idata, "LOGOUT");
if (ImapPollTimeout <= 0 ||
mutt_socket_poll ((*idata)->conn, ImapPollTimeout) != 0)
{
while (imap_cmd_step (*idata) == IMAP_CMD_CONTINUE)
;
}
mutt_socket_close ((*idata)->conn);
imap_free_idata (idata);
}
static int imap_open_new_message (MESSAGE *msg, CONTEXT *dest, HEADER *hdr)
{
BUFFER *tmp = NULL;
int rc = -1;
tmp = mutt_buffer_pool_get ();
mutt_buffer_mktemp (tmp);
if ((msg->fp = safe_fopen (mutt_b2s (tmp), "w")) == NULL)
{
mutt_perror (mutt_b2s (tmp));
goto cleanup;
}
msg->path = safe_strdup (mutt_b2s (tmp));
rc = 0;
cleanup:
mutt_buffer_pool_release (&tmp);
return rc;
}
/* imap_set_flag: append str to flags if we currently have permission
* according to aclbit */
static void imap_set_flag (IMAP_DATA* idata, int aclbit, int flag,
const char *str, char *flags, size_t flsize)
{
if (mutt_bit_isset (idata->ctx->rights, aclbit))
if (flag && imap_has_flag (idata->flags, str))
safe_strcat (flags, flsize, str);
}
/* imap_has_flag: do a caseless comparison of the flag against a flag list,
* return 1 if found or flag list has '\*', 0 otherwise */
int imap_has_flag (LIST* flag_list, const char* flag)
{
if (!flag_list)
return 0;
flag_list = flag_list->next;
while (flag_list)
{
if (!ascii_strncasecmp (flag_list->data, flag, strlen (flag_list->data)))
return 1;
if (!ascii_strncmp (flag_list->data, "\\*", strlen (flag_list->data)))
return 1;
flag_list = flag_list->next;
}
return 0;
}
/* Note: headers must be in SORT_ORDER. See imap_exec_msgset for args.
* Pos is an opaque pointer a la strtok. It should be 0 at first call. */
static int imap_make_msg_set (IMAP_DATA* idata, BUFFER* buf, int flag,
int changed, int invert, int* pos)
{
HEADER** hdrs = idata->ctx->hdrs;
int count = 0; /* number of messages in message set */
int match = 0; /* whether current message matches flag condition */
unsigned int setstart = 0; /* start of current message range */
int n;
int started = 0;
hdrs = idata->ctx->hdrs;
for (n = *pos;
(n < idata->ctx->msgcount) && (mutt_buffer_len (buf) < IMAP_MAX_CMDLEN);
n++)
{
match = 0;
/* don't include pending expunged messages.
*
* TODO: can we unset active in cmd_parse_expunge() and
* cmd_parse_vanished() instead of checking for index != INT_MAX. */
if (hdrs[n]->active && (hdrs[n]->index != INT_MAX))
switch (flag)
{
case MUTT_DELETED:
if (hdrs[n]->deleted != HEADER_DATA(hdrs[n])->deleted)
match = invert ^ hdrs[n]->deleted;
break;
case MUTT_FLAG:
if (hdrs[n]->flagged != HEADER_DATA(hdrs[n])->flagged)
match = invert ^ hdrs[n]->flagged;
break;
case MUTT_OLD:
if (hdrs[n]->old != HEADER_DATA(hdrs[n])->old)
match = invert ^ hdrs[n]->old;
break;
case MUTT_READ:
if (hdrs[n]->read != HEADER_DATA(hdrs[n])->read)
match = invert ^ hdrs[n]->read;
break;
case MUTT_REPLIED:
if (hdrs[n]->replied != HEADER_DATA(hdrs[n])->replied)
match = invert ^ hdrs[n]->replied;
break;
case MUTT_TAG:
if (hdrs[n]->tagged)
match = 1;
break;
case MUTT_TRASH:
if (hdrs[n]->deleted && !hdrs[n]->purge)
match = 1;
break;
}
if (match && (!changed || hdrs[n]->changed))
{
count++;
if (setstart == 0)
{
setstart = HEADER_DATA (hdrs[n])->uid;
if (started == 0)
{
mutt_buffer_add_printf (buf, "%u", HEADER_DATA (hdrs[n])->uid);
started = 1;
}
else
mutt_buffer_add_printf (buf, ",%u", HEADER_DATA (hdrs[n])->uid);
}
/* tie up if the last message also matches */
else if (n == idata->ctx->msgcount-1)
mutt_buffer_add_printf (buf, ":%u", HEADER_DATA (hdrs[n])->uid);
}
/* End current set if message doesn't match or we've reached the end
* of the mailbox via inactive messages following the last match. */
else if (setstart && (hdrs[n]->active || n == idata->ctx->msgcount-1))
{
if (HEADER_DATA (hdrs[n-1])->uid > setstart)
mutt_buffer_add_printf (buf, ":%u", HEADER_DATA (hdrs[n-1])->uid);
setstart = 0;
}
}
*pos = n;
return count;
}
/* Prepares commands for all messages matching conditions (must be flushed
* with imap_exec)
* Params:
* idata: IMAP_DATA containing context containing header set
* pre, post: commands are of the form "%s %s %s %s", tag,
* pre, message set, post
* flag: enum of flag type on which to filter
* changed: include only changed messages in message set
* invert: invert sense of flag, eg MUTT_READ matches unread messages
* Returns: number of matched messages, or -1 on failure */
int imap_exec_msgset (IMAP_DATA* idata, const char* pre, const char* post,
int flag, int changed, int invert)
{
HEADER** hdrs = NULL;
short oldsort;
BUFFER* cmd;
int pos;
int rc;
int count = 0, reopen_set = 0;
cmd = mutt_buffer_new ();
/* Unlike imap_sync_mailbox(), this function can be called when
* IMAP_REOPEN_ALLOW is not set. In that case, the caller isn't
* prepared to handle context changes. Resorting may not always
* give the same order, so we must make a copy.
*
* See the comment in imap_sync_mailbox() for the dangers of running
* even queued execs while reopen is set. To prevent memory
* corruption and data loss we must disable reopen for the duration
* of the swapped hdrs.
*/
if (idata->reopen & IMAP_REOPEN_ALLOW)
{
idata->reopen &= ~IMAP_REOPEN_ALLOW;
reopen_set = 1;
}
oldsort = Sort;
if (Sort != SORT_ORDER)
{
hdrs = idata->ctx->hdrs;
idata->ctx->hdrs = safe_malloc (idata->ctx->msgcount * sizeof (HEADER*));
memcpy (idata->ctx->hdrs, hdrs, idata->ctx->msgcount * sizeof (HEADER*));
Sort = SORT_ORDER;
qsort (idata->ctx->hdrs, idata->ctx->msgcount, sizeof (HEADER*),
mutt_get_sort_func (SORT_ORDER));
}
pos = 0;
do
{
mutt_buffer_clear (cmd);
mutt_buffer_add_printf (cmd, "%s ", pre);
rc = imap_make_msg_set (idata, cmd, flag, changed, invert, &pos);
if (rc > 0)
{
mutt_buffer_add_printf (cmd, " %s", post);
if (imap_exec (idata, cmd->data, IMAP_CMD_QUEUE))
{
rc = -1;
goto out;
}
count += rc;
}
}
while (rc > 0);
rc = count;
out:
mutt_buffer_free (&cmd);
if ((oldsort != Sort) || hdrs)
{
Sort = oldsort;
FREE (&idata->ctx->hdrs);
idata->ctx->hdrs = hdrs;
}
if (reopen_set)
idata->reopen |= IMAP_REOPEN_ALLOW;
return rc;
}
/* returns 0 if mutt's flags match cached server flags:
* EXCLUDING the deleted flag. */
static int compare_flags_for_copy (HEADER* h)
{
IMAP_HEADER_DATA* hd = (IMAP_HEADER_DATA*)h->data;
if (h->read != hd->read)
return 1;
if (h->old != hd->old)
return 1;
if (h->flagged != hd->flagged)
return 1;
if (h->replied != hd->replied)
return 1;
return 0;
}
/* Update the IMAP server to reflect the flags for a single message before
* performing a "UID COPY".
* NOTE: This does not sync the "deleted" flag state, because it is not
* desirable to propagate that flag into the copy.
*/
int imap_sync_message_for_copy (IMAP_DATA *idata, HEADER *hdr, BUFFER *cmd,
int *err_continue)
{
char flags[LONG_STRING];
char uid[11];
if (!compare_flags_for_copy (hdr))
{
if (hdr->deleted == HEADER_DATA(hdr)->deleted)
hdr->changed = 0;
return 0;
}
snprintf (uid, sizeof (uid), "%u", HEADER_DATA(hdr)->uid);
mutt_buffer_clear (cmd);
mutt_buffer_addstr (cmd, "UID STORE ");
mutt_buffer_addstr (cmd, uid);
flags[0] = '\0';
imap_set_flag (idata, MUTT_ACL_SEEN, hdr->read, "\\Seen ",
flags, sizeof (flags));
imap_set_flag (idata, MUTT_ACL_WRITE, hdr->old,
"Old ", flags, sizeof (flags));
imap_set_flag (idata, MUTT_ACL_WRITE, hdr->flagged,
"\\Flagged ", flags, sizeof (flags));
imap_set_flag (idata, MUTT_ACL_WRITE, hdr->replied,
"\\Answered ", flags, sizeof (flags));
imap_set_flag (idata, MUTT_ACL_DELETE, HEADER_DATA(hdr)->deleted,
"\\Deleted ", flags, sizeof (flags));
/* now make sure we don't lose custom tags */
if (mutt_bit_isset (idata->ctx->rights, MUTT_ACL_WRITE))
imap_add_keywords (flags, hdr, idata->flags, sizeof (flags));
mutt_remove_trailing_ws (flags);
/* UW-IMAP is OK with null flags, Cyrus isn't. The only solution is to
* explicitly revoke all system flags (if we have permission) */
if (!*flags)
{
imap_set_flag (idata, MUTT_ACL_SEEN, 1, "\\Seen ", flags, sizeof (flags));
imap_set_flag (idata, MUTT_ACL_WRITE, 1, "Old ", flags, sizeof (flags));
imap_set_flag (idata, MUTT_ACL_WRITE, 1, "\\Flagged ", flags, sizeof (flags));
imap_set_flag (idata, MUTT_ACL_WRITE, 1, "\\Answered ", flags, sizeof (flags));
imap_set_flag (idata, MUTT_ACL_DELETE, !HEADER_DATA(hdr)->deleted,
"\\Deleted ", flags, sizeof (flags));
mutt_remove_trailing_ws (flags);
mutt_buffer_addstr (cmd, " -FLAGS.SILENT (");
}
else
mutt_buffer_addstr (cmd, " FLAGS.SILENT (");
mutt_buffer_addstr (cmd, flags);
mutt_buffer_addstr (cmd, ")");
/* after all this it's still possible to have no flags, if you
* have no ACL rights */
if (*flags && (imap_exec (idata, cmd->data, 0) != 0) &&
err_continue && (*err_continue != MUTT_YES))
{
*err_continue = imap_continue ("imap_sync_message: STORE failed",
idata->buf);
if (*err_continue != MUTT_YES)
return -1;
}
if (hdr->deleted == HEADER_DATA(hdr)->deleted)
hdr->changed = 0;
return 0;
}
static int sync_helper (IMAP_DATA* idata, int right, int flag, const char* name)
{
int count = 0;
int rc;
char buf[LONG_STRING];
if (!idata->ctx)
return -1;
if (!mutt_bit_isset (idata->ctx->rights, right))
return 0;
if (right == MUTT_ACL_WRITE && !imap_has_flag (idata->flags, name))
return 0;
snprintf (buf, sizeof(buf), "+FLAGS.SILENT (%s)", name);
if ((rc = imap_exec_msgset (idata, "UID STORE", buf, flag, 1, 0)) < 0)
return rc;
count += rc;
buf[0] = '-';
if ((rc = imap_exec_msgset (idata, "UID STORE", buf, flag, 1, 1)) < 0)
return rc;
count += rc;
return count;
}
/* update the IMAP server to reflect message changes done within mutt.
* Arguments
* ctx: the current context
* expunge: 0 or 1 - do expunge?
*/
int imap_sync_mailbox (CONTEXT* ctx, int expunge, int* index_hint)
{
IMAP_DATA* idata;
CONTEXT* appendctx = NULL;
HEADER* h;
HEADER** hdrs = NULL;
int oldsort;
int n;
int rc, quickdel_rc = 0;
idata = (IMAP_DATA*) ctx->data;
if (idata->state < IMAP_SELECTED)
{
dprint (2, (debugfile, "imap_sync_mailbox: no mailbox selected\n"));
return -1;
}
/* This function is only called when the calling code expects the context
* to be changed. */
imap_allow_reopen (ctx);
if ((rc = imap_check_mailbox (ctx, index_hint, 0)) != 0)
goto out;
/* if we are expunging anyway, we can do deleted messages very quickly... */
if (expunge && mutt_bit_isset (ctx->rights, MUTT_ACL_DELETE))
{
if ((quickdel_rc = imap_exec_msgset (idata,
"UID STORE", "+FLAGS.SILENT (\\Deleted)",
MUTT_DELETED, 1, 0)) < 0)
{
rc = quickdel_rc;
mutt_error (_("Expunge failed"));
mutt_sleep (1);
goto out;
}
if (quickdel_rc > 0)
{
/* mark these messages as unchanged so second pass ignores them. Done
* here so BOGUS UW-IMAP 4.7 SILENT FLAGS updates are ignored. */
for (n = 0; n < ctx->msgcount; n++)
if (ctx->hdrs[n]->deleted && ctx->hdrs[n]->changed)
ctx->hdrs[n]->active = 0;
if (!ctx->quiet)
mutt_message (_("Marking %d messages deleted..."), quickdel_rc);
}
}
#if USE_HCACHE
idata->hcache = imap_hcache_open (idata, NULL);
#endif
/* save messages with real (non-flag) changes */
for (n = 0; n < ctx->msgcount; n++)
{
h = ctx->hdrs[n];
if (h->deleted)
{
imap_cache_del (idata, h);
#if USE_HCACHE
imap_hcache_del (idata, HEADER_DATA(h)->uid);
#endif
}
if (h->active && h->changed)
{
#if USE_HCACHE
imap_hcache_put (idata, h);
#endif
/* if the message has been rethreaded or attachments have been deleted
* we delete the message and reupload it.
* This works better if we're expunging, of course. */
/* TODO: why the h->env check? */
if ((h->env && h->env->changed) || h->attach_del)
{
/* NOTE and TODO:
*
* The mx_open_mailbox() in append mode below merely hijacks an existing
* idata; it doesn't reset idata->ctx. imap_append_message() ends up
* using (borrowing) the same idata we are using.
*
* Right after the APPEND operation finishes, the server can send an
* EXISTS notifying of the new message. Then, while still inside
* imap_append_message(), imap_cmd_step() -> imap_cmd_finish() will
* call imap_read_headers() to download those (because the idata's
* reopen_allow is set).
*
* The imap_read_headers() will open (and clobber) the idata->hcache we
* just opened above, then close it.
*
* The easy and less dangerous fix done here (for a stable branch bug
* fix) is to close and reopen the header cache around the operation.
*
* A better fix would be allowing idata->hcache reuse. When that is
* done, the close/reopen in read_headers_condstore_qresync_updates()
* can also be removed. */
#if USE_HCACHE
imap_hcache_close (idata);
#endif
if (!ctx->quiet)
mutt_message (_("Saving changed messages... [%d/%d]"), n+1,
ctx->msgcount);
if (!appendctx)
appendctx = mx_open_mailbox (ctx->path, MUTT_APPEND | MUTT_QUIET, NULL);
if (!appendctx)
dprint (1, (debugfile, "imap_sync_mailbox: Error opening mailbox in append mode\n"));
else
_mutt_save_message (h, appendctx, 1, 0, 0);
/* TODO: why the check for h->env? Is this possible? */
if (h->env)
h->env->changed = 0;
#if USE_HCACHE
idata->hcache = imap_hcache_open (idata, NULL);
#endif
}
}
}
#if USE_HCACHE
imap_hcache_close (idata);
#endif
/* presort here to avoid doing 10 resorts in imap_exec_msgset.
*
* Note: sync_helper() may trigger an imap_exec() if the queue fills
* up. Because IMAP_REOPEN_ALLOW is set, this may result in new
* messages being downloaded or an expunge being processed. For new
* messages this would both result in memory corruption (since we're
* alloc'ing msgcount instead of hdrmax pointers) and data loss of
* the new messages. For an expunge, the restored hdrs would point
* to headers that have been freed.
*
* Since reopen is allowed, we could change this to call
* mutt_sort_headers() before and after instead, but the double sort
* is noticeably slower.
*
* So instead, just turn off reopen_allow for the duration of the
* swapped hdrs. The imap_exec() below flushes the queue out,
* giving the opportunity to process any reopen events.
*/
imap_disallow_reopen (ctx);
oldsort = Sort;
if (Sort != SORT_ORDER)
{
hdrs = ctx->hdrs;
ctx->hdrs = safe_malloc (ctx->msgcount * sizeof (HEADER*));
memcpy (ctx->hdrs, hdrs, ctx->msgcount * sizeof (HEADER*));
Sort = SORT_ORDER;
qsort (ctx->hdrs, ctx->msgcount, sizeof (HEADER*),
mutt_get_sort_func (SORT_ORDER));
}
rc = sync_helper (idata, MUTT_ACL_DELETE, MUTT_DELETED, "\\Deleted");
if (rc >= 0)
rc |= sync_helper (idata, MUTT_ACL_WRITE, MUTT_FLAG, "\\Flagged");
if (rc >= 0)
rc |= sync_helper (idata, MUTT_ACL_WRITE, MUTT_OLD, "Old");
if (rc >= 0)
rc |= sync_helper (idata, MUTT_ACL_SEEN, MUTT_READ, "\\Seen");
if (rc >= 0)
rc |= sync_helper (idata, MUTT_ACL_WRITE, MUTT_REPLIED, "\\Answered");
if ((oldsort != Sort) || hdrs)
{
Sort = oldsort;
FREE (&ctx->hdrs);
ctx->hdrs = hdrs;
}
imap_allow_reopen (ctx);
/* Flush the queued flags if any were changed in sync_helper.
* The real (non-flag) changes loop might have flushed quickdel_rc
* queued commands, so we double check the cmdbuf isn't empty. */
if (((rc > 0) || (quickdel_rc > 0)) && mutt_buffer_len (idata->cmdbuf))
if (imap_exec (idata, NULL, 0) != IMAP_CMD_OK)
rc = -1;
if (rc < 0)
{
if (ctx->closing)
{
if (mutt_yesorno (_("Error saving flags. Close anyway?"), 0) == MUTT_YES)
{
rc = 0;
idata->state = IMAP_AUTHENTICATED;
goto out;
}
}
else
mutt_error _("Error saving flags");
rc = -1;
goto out;
}
/* Update local record of server state to reflect the synchronization just
* completed. imap_read_headers always overwrites hcache-origin flags, so
* there is no need to mutate the hcache after flag-only changes. */
for (n = 0; n < ctx->msgcount; n++)
{
HEADER_DATA(ctx->hdrs[n])->deleted = ctx->hdrs[n]->deleted;
HEADER_DATA(ctx->hdrs[n])->flagged = ctx->hdrs[n]->flagged;
HEADER_DATA(ctx->hdrs[n])->old = ctx->hdrs[n]->old;
HEADER_DATA(ctx->hdrs[n])->read = ctx->hdrs[n]->read;
HEADER_DATA(ctx->hdrs[n])->replied = ctx->hdrs[n]->replied;
ctx->hdrs[n]->changed = 0;
}
ctx->changed = 0;
/* We must send an EXPUNGE command if we're not closing. */
if (expunge && !(ctx->closing) &&
mutt_bit_isset(ctx->rights, MUTT_ACL_DELETE))
{
if (!ctx->quiet)
mutt_message _("Expunging messages from server...");
/* Set expunge bit so we don't get spurious reopened messages */
idata->reopen |= IMAP_EXPUNGE_EXPECTED;
if (imap_exec (idata, "EXPUNGE", 0) != 0)
{
idata->reopen &= ~IMAP_EXPUNGE_EXPECTED;
imap_error (_("imap_sync_mailbox: EXPUNGE failed"), idata->buf);
rc = -1;
goto out;
}
idata->reopen &= ~IMAP_EXPUNGE_EXPECTED;
}
if (expunge && ctx->closing)
{
imap_exec (idata, "CLOSE", IMAP_CMD_QUEUE);
idata->state = IMAP_AUTHENTICATED;
}
if (option (OPTMESSAGECACHECLEAN))
imap_cache_clean (idata);
rc = 0;
out:
imap_disallow_reopen (ctx);
if (appendctx)
{
mx_fastclose_mailbox (appendctx);
FREE (&appendctx);
}
return rc;
}
/* imap_close_mailbox: clean up IMAP data in CONTEXT */
int imap_close_mailbox (CONTEXT* ctx)
{
IMAP_DATA* idata;
int i;
idata = (IMAP_DATA*) ctx->data;
/* Check to see if the mailbox is actually open */
if (!idata)
return 0;
/* imap_open_mailbox_append() borrows the IMAP_DATA temporarily,
* just for the connection, but does not set idata->ctx to the
* open-append ctx.
*
* So when these are equal, it means we are actually closing the
* mailbox and should clean up idata. Otherwise, we don't want to
* touch idata - it's still being used.
*/
if (ctx == idata->ctx)
{
if (idata->status != IMAP_FATAL && idata->state >= IMAP_SELECTED)
{
/* mx_close_mailbox won't sync if there are no deleted messages
* and the mailbox is unchanged, so we may have to close here */
if (!ctx->deleted)
imap_exec (idata, "CLOSE", IMAP_CMD_QUEUE);
idata->state = IMAP_AUTHENTICATED;
}
idata->reopen = 0;
FREE (&(idata->mailbox));
mutt_free_list (&idata->flags);
idata->ctx = NULL;
hash_destroy (&idata->uid_hash, NULL);
FREE (&idata->msn_index);
idata->msn_index_size = 0;
idata->max_msn = 0;
for (i = 0; i < IMAP_CACHE_LEN; i++)
{
if (idata->cache[i].path)
{
unlink (idata->cache[i].path);
FREE (&idata->cache[i].path);
}
}
mutt_bcache_close (&idata->bcache);
}
/* free IMAP part of headers */
for (i = 0; i < ctx->msgcount; i++)
/* mailbox may not have fully loaded */
if (ctx->hdrs[i] && ctx->hdrs[i]->data)
imap_free_header_data ((IMAP_HEADER_DATA**)&(ctx->hdrs[i]->data));
return 0;
}
/* use the NOOP or IDLE command to poll for new mail
*
* return values:
* MUTT_REOPENED mailbox has been externally modified
* MUTT_NEW_MAIL new mail has arrived!
* 0 no change
* -1 error
*/
int imap_check_mailbox (CONTEXT *ctx, int *index_hint, int force)
{
/* overload keyboard timeout to avoid many mailbox checks in a row.
* Most users don't like having to wait exactly when they press a key. */
IMAP_DATA* idata;
int result = 0;
idata = (IMAP_DATA*) ctx->data;
/* try IDLE first, unless force is set */
if (!force && option (OPTIMAPIDLE) && mutt_bit_isset (idata->capabilities, IDLE)
&& (idata->state != IMAP_IDLE || time(NULL) >= idata->lastread + ImapKeepalive))
{
if (imap_cmd_idle (idata) < 0)
return -1;
}
if (idata->state == IMAP_IDLE)
{
while ((result = mutt_socket_poll (idata->conn, 0)) > 0)
{
if (imap_cmd_step (idata) != IMAP_CMD_CONTINUE)
{
dprint (1, (debugfile, "Error reading IDLE response\n"));
return -1;
}
}
if (result < 0)
{
dprint (1, (debugfile, "Poll failed, disabling IDLE\n"));
mutt_bit_unset (idata->capabilities, IDLE);
}
}
if ((force ||
(idata->state != IMAP_IDLE && time(NULL) >= idata->lastread + Timeout))
&& imap_exec (idata, "NOOP", IMAP_CMD_POLL) != 0)
return -1;
/* We call this even when we haven't run NOOP in case we have pending
* changes to process, since we can reopen here. */
imap_cmd_finish (idata);
if (idata->check_status & IMAP_EXPUNGE_PENDING)
result = MUTT_REOPENED;
else if (idata->check_status & IMAP_NEWMAIL_PENDING)
result = MUTT_NEW_MAIL;
else if (idata->check_status & IMAP_FLAGS_PENDING)
result = MUTT_FLAGS;
idata->check_status = 0;
return result;
}
static int imap_check_mailbox_reopen (CONTEXT *ctx, int *index_hint)
{
int rc;
imap_allow_reopen (ctx);
rc = imap_check_mailbox (ctx, index_hint, 0);
imap_disallow_reopen (ctx);
return rc;
}
static int imap_save_to_header_cache (CONTEXT *ctx, HEADER *h)
{
int rc = 0;
#ifdef USE_HCACHE
int close_hc = 1;
IMAP_DATA* idata;
idata = (IMAP_DATA *)ctx->data;
if (idata->hcache)
close_hc = 0;
else
idata->hcache = imap_hcache_open (idata, NULL);
rc = imap_hcache_put (idata, h);
if (close_hc)
imap_hcache_close (idata);
#endif
return rc;
}
/* split path into (idata,mailbox name) */
static int imap_get_mailbox (const char* path, IMAP_DATA** hidata, char* buf, size_t blen)
{
IMAP_MBOX mx;
if (imap_parse_path (path, &mx))
{
dprint (1, (debugfile, "imap_get_mailbox: Error parsing %s\n", path));
return -1;
}
if (!(*hidata = imap_conn_find (&(mx.account), option (OPTIMAPPASSIVE) ? MUTT_IMAP_CONN_NONEW : 0)))
{
FREE (&mx.mbox);
return -1;
}
imap_fix_path (*hidata, mx.mbox, buf, blen);
if (!*buf)
strfcpy (buf, "INBOX", blen);
FREE (&mx.mbox);
return 0;
}
/* check for new mail in any subscribed mailboxes. Given a list of mailboxes
* rather than called once for each so that it can batch the commands and
* save on round trips. Returns number of mailboxes with new mail. */
int imap_buffy_check (int force, int check_stats)
{
IMAP_DATA* idata;
IMAP_DATA* lastdata = NULL;
BUFFY* mailbox;
char name[LONG_STRING];
char command[LONG_STRING*2];
char munged[LONG_STRING];
int buffies = 0;
for (mailbox = Incoming; mailbox; mailbox = mailbox->next)
{
/* Init newly-added mailboxes */
if (! mailbox->magic)
{
if (mx_is_imap (mutt_b2s (mailbox->pathbuf)))
mailbox->magic = MUTT_IMAP;
}
if (mailbox->magic != MUTT_IMAP)
continue;
if (mailbox->nopoll)
continue;
if (imap_get_mailbox (mutt_b2s (mailbox->pathbuf), &idata, name, sizeof (name)) < 0)
{
mailbox->new = 0;
continue;
}
/* Don't issue STATUS on the selected mailbox, it will be NOOPed or
* IDLEd elsewhere.
* idata->mailbox may be NULL for connections other than the current
* mailbox's, and shouldn't expand to INBOX in that case. #3216. */
if (idata->mailbox && !imap_mxcmp (name, idata->mailbox))
{
mailbox->new = 0;
continue;
}
if (!mutt_bit_isset (idata->capabilities, IMAP4REV1) &&
!mutt_bit_isset (idata->capabilities, STATUS))
{
dprint (2, (debugfile, "Server doesn't support STATUS\n"));
continue;
}
if (lastdata && idata != lastdata)
{
/* Send commands to previous server. Sorting the buffy list
* may prevent some infelicitous interleavings */
if (imap_exec (lastdata, NULL, IMAP_CMD_FAIL_OK | IMAP_CMD_POLL) == -1)
dprint (1, (debugfile, "Error polling mailboxes\n"));
lastdata = NULL;
}
if (!lastdata)
lastdata = idata;
imap_munge_mbox_name (idata, munged, sizeof (munged), name);
if (check_stats)
snprintf (command, sizeof (command),
"STATUS %s (UIDNEXT UIDVALIDITY UNSEEN RECENT MESSAGES)", munged);
else
snprintf (command, sizeof (command),
"STATUS %s (UIDNEXT UIDVALIDITY UNSEEN RECENT)", munged);
if (imap_exec (idata, command, IMAP_CMD_QUEUE | IMAP_CMD_POLL) < 0)
{
dprint (1, (debugfile, "Error queueing command\n"));
return 0;
}
}
if (lastdata && (imap_exec (lastdata, NULL, IMAP_CMD_FAIL_OK | IMAP_CMD_POLL) == -1))
{
dprint (1, (debugfile, "Error polling mailboxes\n"));
return 0;
}
/* collect results */
for (mailbox = Incoming; mailbox; mailbox = mailbox->next)
{
if (mailbox->magic == MUTT_IMAP && mailbox->new)
buffies++;
}
return buffies;
}
/* imap_status: returns count of messages in mailbox, or -1 on error.
* if queue != 0, queue the command and expect it to have been run
* on the next call (for pipelining the postponed count) */
int imap_status (const char* path, int queue)
{
static int queued = 0;
IMAP_DATA *idata;
char buf[LONG_STRING*2];
char mbox[LONG_STRING];
IMAP_STATUS* status;
if (imap_get_mailbox (path, &idata, buf, sizeof (buf)) < 0)
return -1;
/* We are in the folder we're polling - just return the mailbox count.
*
* Note that imap_mxcmp() converts NULL to "INBOX", so we need to
* make sure the idata really is open to a folder. */
if (idata->ctx && !imap_mxcmp (buf, idata->mailbox))
return idata->ctx->msgcount;
else if (mutt_bit_isset(idata->capabilities,IMAP4REV1) ||
mutt_bit_isset(idata->capabilities,STATUS))
{
imap_munge_mbox_name (idata, mbox, sizeof(mbox), buf);
snprintf (buf, sizeof (buf), "STATUS %s (%s)", mbox, "MESSAGES");
imap_unmunge_mbox_name (idata, mbox);
}
else
/* Server does not support STATUS, and this is not the current mailbox.
* There is no lightweight way to check recent arrivals */
return -1;
if (queue)
{
imap_exec (idata, buf, IMAP_CMD_QUEUE);
queued = 1;
return 0;
}
else if (!queued)
imap_exec (idata, buf, 0);
queued = 0;
if ((status = imap_mboxcache_get (idata, mbox, 0)))
return status->messages;
return 0;
}
/* return cached mailbox stats or NULL if create is 0 */
IMAP_STATUS* imap_mboxcache_get (IMAP_DATA* idata, const char* mbox, int create)
{
LIST* cur;
IMAP_STATUS* status;
IMAP_STATUS scache;
#ifdef USE_HCACHE
header_cache_t *hc = NULL;
void *puidvalidity = NULL;
void *puidnext = NULL;
void *pmodseq = NULL;
#endif
for (cur = idata->mboxcache; cur; cur = cur->next)
{
status = (IMAP_STATUS*)cur->data;
if (!imap_mxcmp (mbox, status->name))
return status;
}
status = NULL;
/* lame */
if (create)
{
memset (&scache, 0, sizeof (scache));
scache.name = (char*)mbox;
idata->mboxcache = mutt_add_list_n (idata->mboxcache, &scache,
sizeof (scache));
status = imap_mboxcache_get (idata, mbox, 0);
status->name = safe_strdup (mbox);
}
#ifdef USE_HCACHE
hc = imap_hcache_open (idata, mbox);
if (hc)
{
puidvalidity = mutt_hcache_fetch_raw (hc, "/UIDVALIDITY", imap_hcache_keylen);
puidnext = mutt_hcache_fetch_raw (hc, "/UIDNEXT", imap_hcache_keylen);
pmodseq = mutt_hcache_fetch_raw (hc, "/MODSEQ", imap_hcache_keylen);
if (puidvalidity)
{
if (!status)
{
mutt_hcache_free ((void **)&puidvalidity);
mutt_hcache_free ((void **)&puidnext);
mutt_hcache_free ((void **)&pmodseq);
mutt_hcache_close (hc);
return imap_mboxcache_get (idata, mbox, 1);
}
memcpy (&status->uidvalidity, puidvalidity, sizeof(unsigned int));
if (puidnext)
memcpy (&status->uidnext, puidnext, sizeof(unsigned int));
else
status->uidnext = 0;
if (pmodseq)
memcpy (&status->modseq, pmodseq, sizeof(unsigned long long));
else
status->modseq = 0;
dprint (3, (debugfile, "mboxcache: hcache uidvalidity %u, uidnext %u, modseq %llu\n",
status->uidvalidity, status->uidnext, status->modseq));
}
mutt_hcache_free ((void **)&puidvalidity);
mutt_hcache_free ((void **)&puidnext);
mutt_hcache_free ((void **)&pmodseq);
mutt_hcache_close (hc);
}
#endif
return status;
}
void imap_mboxcache_free (IMAP_DATA* idata)
{
LIST* cur;
IMAP_STATUS* status;
for (cur = idata->mboxcache; cur; cur = cur->next)
{
status = (IMAP_STATUS*)cur->data;
FREE (&status->name);
}
mutt_free_list (&idata->mboxcache);
}
/* returns number of patterns in the search that should be done server-side
* (eg are full-text) */
static int do_search (const pattern_t* search, int allpats)
{
int rc = 0;
const pattern_t* pat;
for (pat = search; pat; pat = pat->next)
{
switch (pat->op)
{
case MUTT_BODY:
case MUTT_HEADER:
case MUTT_WHOLE_MSG:
if (pat->stringmatch)
rc++;
break;
default:
if (pat->child && do_search (pat->child, 1))
rc++;
}
if (!allpats)
break;
}
return rc;
}
/* convert mutt pattern_t to IMAP SEARCH command containing only elements
* that require full-text search (mutt already has what it needs for most
* match types, and does a better job (eg server doesn't support regexps). */
static int imap_compile_search (const pattern_t* pat, BUFFER* buf)
{
if (! do_search (pat, 0))
return 0;
if (pat->not)
mutt_buffer_addstr (buf, "NOT ");
if (pat->child)
{
int clauses;
if ((clauses = do_search (pat->child, 1)) > 0)
{
const pattern_t* clause = pat->child;
mutt_buffer_addch (buf, '(');
while (clauses)
{
if (do_search (clause, 0))
{
if (pat->op == MUTT_OR && clauses > 1)
mutt_buffer_addstr (buf, "OR ");
clauses--;
if (imap_compile_search (clause, buf) < 0)
return -1;
if (clauses)
mutt_buffer_addch (buf, ' ');
}
clause = clause->next;
}
mutt_buffer_addch (buf, ')');
}
}
else
{
char term[STRING];
char *delim;
switch (pat->op)
{
case MUTT_HEADER:
mutt_buffer_addstr (buf, "HEADER ");
/* extract header name */
if (! (delim = strchr (pat->p.str, ':')))
{
mutt_error (_("Header search without header name: %s"), pat->p.str);
return -1;
}
*delim = '\0';
imap_quote_string (term, sizeof (term), pat->p.str);
mutt_buffer_addstr (buf, term);
mutt_buffer_addch (buf, ' ');
/* and field */
*delim = ':';
delim++;
SKIPWS(delim);
imap_quote_string (term, sizeof (term), delim);
mutt_buffer_addstr (buf, term);
break;
case MUTT_BODY:
mutt_buffer_addstr (buf, "BODY ");
imap_quote_string (term, sizeof (term), pat->p.str);
mutt_buffer_addstr (buf, term);
break;
case MUTT_WHOLE_MSG:
mutt_buffer_addstr (buf, "TEXT ");
imap_quote_string (term, sizeof (term), pat->p.str);
mutt_buffer_addstr (buf, term);
break;
}
}
return 0;
}
int imap_search (CONTEXT* ctx, const pattern_t* pat)
{
BUFFER buf;
IMAP_DATA* idata = (IMAP_DATA*)ctx->data;
int i;
for (i = 0; i < ctx->msgcount; i++)
ctx->hdrs[i]->matched = 0;
if (!do_search (pat, 1))
return 0;
mutt_buffer_init (&buf);
mutt_buffer_addstr (&buf, "UID SEARCH ");
if (imap_compile_search (pat, &buf) < 0)
{
FREE (&buf.data);
return -1;
}
if (imap_exec (idata, buf.data, 0) < 0)
{
FREE (&buf.data);
return -1;
}
FREE (&buf.data);
return 0;
}
int imap_subscribe (char *path, int subscribe)
{
IMAP_DATA *idata;
char buf[LONG_STRING*2];
char mbox[LONG_STRING];
int mblen;
BUFFER err;
IMAP_MBOX mx;
if (!mx_is_imap (path) || imap_parse_path (path, &mx) || !mx.mbox)
{
mutt_error (_("Bad mailbox name"));
return -1;
}
if (!(idata = imap_conn_find (&(mx.account), 0)))
goto fail;
imap_fix_path (idata, mx.mbox, buf, sizeof (buf));
if (!*buf)
strfcpy (buf, "INBOX", sizeof (buf));
if (option (OPTIMAPCHECKSUBSCRIBED))
{
mutt_buffer_init (&err);
err.dsize = STRING;
err.data = safe_malloc (err.dsize);
mblen = snprintf (mbox, sizeof (mbox), "%smailboxes ",
subscribe ? "" : "un");
imap_quote_string_and_backquotes (mbox + mblen, sizeof(mbox) - mblen,
path);
if (mutt_parse_rc_line (mbox, &err))
dprint (1, (debugfile, "Error adding subscribed mailbox: %s\n", err.data));
FREE (&err.data);
}
if (subscribe)
mutt_message (_("Subscribing to %s..."), buf);
else
mutt_message (_("Unsubscribing from %s..."), buf);
imap_munge_mbox_name (idata, mbox, sizeof(mbox), buf);
snprintf (buf, sizeof (buf), "%sSUBSCRIBE %s", subscribe ? "" : "UN", mbox);
if (imap_exec (idata, buf, 0) < 0)
goto fail;
imap_unmunge_mbox_name(idata, mx.mbox);
if (subscribe)
mutt_message (_("Subscribed to %s"), mx.mbox);
else
mutt_message (_("Unsubscribed from %s"), mx.mbox);
FREE (&mx.mbox);
return 0;
fail:
FREE (&mx.mbox);
return -1;
}
/* trim dest to the length of the longest prefix it shares with src,
* returning the length of the trimmed string */
static size_t
longest_common_prefix (char *dest, const char* src, size_t start, size_t dlen)
{
size_t pos = start;
while (pos < dlen && dest[pos] && dest[pos] == src[pos])
pos++;
dest[pos] = '\0';
return pos;
}
/* look for IMAP URLs to complete from defined mailboxes. Could be extended
* to complete over open connections and account/folder hooks too. */
static int
imap_complete_hosts (char *dest, size_t len)
{
BUFFY* mailbox;
CONNECTION* conn;
int rc = -1;
size_t matchlen;
matchlen = mutt_strlen (dest);
for (mailbox = Incoming; mailbox; mailbox = mailbox->next)
{
if (!mutt_strncmp (dest, mutt_b2s (mailbox->pathbuf), matchlen))
{
if (rc)
{
strfcpy (dest, mutt_b2s (mailbox->pathbuf), len);
rc = 0;
}
else
longest_common_prefix (dest, mutt_b2s (mailbox->pathbuf), matchlen, len);
}
}
for (conn = mutt_socket_head (); conn; conn = conn->next)
{
ciss_url_t url;
char urlstr[LONG_STRING];
if (conn->account.type != MUTT_ACCT_TYPE_IMAP)
continue;
mutt_account_tourl (&conn->account, &url);
/* FIXME: how to handle multiple users on the same host? */
url.user = NULL;
url.path = NULL;
url_ciss_tostring (&url, urlstr, sizeof (urlstr), 0);
if (!mutt_strncmp (dest, urlstr, matchlen))
{
if (rc)
{
strfcpy (dest, urlstr, len);
rc = 0;
}
else
longest_common_prefix (dest, urlstr, matchlen, len);
}
}
return rc;
}
/* imap_complete: given a partial IMAP folder path, return a string which
* adds as much to the path as is unique */
int imap_complete(char* dest, size_t dlen, const char* path)
{
IMAP_DATA* idata;
char list[LONG_STRING];
char buf[LONG_STRING*2];
IMAP_LIST listresp;
char completion[LONG_STRING];
int clen;
size_t matchlen = 0;
int completions = 0;
IMAP_MBOX mx;
int rc;
if (imap_parse_path (path, &mx))
{
strfcpy (dest, path, dlen);
return imap_complete_hosts (dest, dlen);
}
/* don't open a new socket just for completion. Instead complete over
* known mailboxes/hooks/etc */
if (!(idata = imap_conn_find (&(mx.account), MUTT_IMAP_CONN_NONEW)))
{
FREE (&mx.mbox);
strfcpy (dest, path, dlen);
return imap_complete_hosts (dest, dlen);
}
/* reformat path for IMAP list, and append wildcard */
/* don't use INBOX in place of "" */
if (mx.mbox && mx.mbox[0])
imap_fix_path (idata, mx.mbox, list, sizeof(list));
else
list[0] = '\0';
/* fire off command */
snprintf (buf, sizeof(buf), "%s \"\" \"%s%%\"",
option (OPTIMAPLSUB) ? "LSUB" : "LIST", list);
imap_cmd_start (idata, buf);
/* and see what the results are */
strfcpy (completion, NONULL(mx.mbox), sizeof(completion));
idata->cmdtype = IMAP_CT_LIST;
idata->cmddata = &listresp;
do
{
listresp.name = NULL;
rc = imap_cmd_step (idata);
if (rc == IMAP_CMD_CONTINUE && listresp.name)
{
/* if the folder isn't selectable, append delimiter to force browse
* to enter it on second tab. */
if (listresp.noselect)
{
clen = strlen(listresp.name);
listresp.name[clen++] = listresp.delim;
listresp.name[clen] = '\0';
}
/* copy in first word */
if (!completions)
{
strfcpy (completion, listresp.name, sizeof(completion));
matchlen = strlen (completion);
completions++;
continue;
}
matchlen = longest_common_prefix (completion, listresp.name, 0, matchlen);
completions++;
}
}
while (rc == IMAP_CMD_CONTINUE);
idata->cmddata = NULL;
if (completions)
{
/* reformat output */
imap_qualify_path (dest, dlen, &mx, completion);
mutt_pretty_mailbox (dest, dlen);
FREE (&mx.mbox);
return 0;
}
return -1;
}
/* imap_fast_trash: use server COPY command to copy deleted
* messages to the trash folder.
* Return codes:
* -1: error
* 0: success
* 1: non-fatal error - try fetch/append */
int imap_fast_trash (CONTEXT* ctx, char* dest)
{
IMAP_DATA* idata;
char mbox[LONG_STRING];
char mmbox[LONG_STRING];
char prompt[LONG_STRING];
int n, rc;
IMAP_MBOX mx;
int triedcreate = 0;
BUFFER *sync_cmd = NULL;
int err_continue = MUTT_NO;
idata = (IMAP_DATA*) ctx->data;
if (imap_parse_path (dest, &mx))
{
dprint (1, (debugfile, "imap_fast_trash: bad destination %s\n", dest));
return -1;
}
/* check that the save-to folder is in the same account */
if (!mutt_account_match (&(CTX_DATA->conn->account), &(mx.account)))
{
dprint (3, (debugfile, "imap_fast_trash: %s not same server as %s\n",
dest, ctx->path));
return 1;
}
imap_fix_path (idata, mx.mbox, mbox, sizeof (mbox));
if (!*mbox)
strfcpy (mbox, "INBOX", sizeof (mbox));
imap_munge_mbox_name (idata, mmbox, sizeof (mmbox), mbox);
sync_cmd = mutt_buffer_new ();
for (n = 0; n < ctx->msgcount; n++)
{
if (ctx->hdrs[n]->active && ctx->hdrs[n]->changed &&
ctx->hdrs[n]->deleted && !ctx->hdrs[n]->purge)
{
rc = imap_sync_message_for_copy (idata, ctx->hdrs[n], sync_cmd, &err_continue);
if (rc < 0)
{
dprint (1, (debugfile, "imap_fast_trash: could not sync\n"));
goto out;
}
}
}
/* loop in case of TRYCREATE */
do
{
rc = imap_exec_msgset (idata, "UID COPY", mmbox, MUTT_TRASH, 0, 0);
if (!rc)
{
dprint (1, (debugfile, "imap_fast_trash: No messages to trash\n"));
rc = -1;
goto out;
}
else if (rc < 0)
{
dprint (1, (debugfile, "could not queue copy\n"));
goto out;
}
else if (!ctx->quiet)
mutt_message (_("Copying %d messages to %s..."), rc, mbox);
/* let's get it on */
rc = imap_exec (idata, NULL, IMAP_CMD_FAIL_OK);
if (rc == -2)
{
if (triedcreate)
{
dprint (1, (debugfile, "Already tried to create mailbox %s\n", mbox));
break;
}
/* bail out if command failed for reasons other than nonexistent target */
if (ascii_strncasecmp (imap_get_qualifier (idata->buf), "[TRYCREATE]", 11))
break;
dprint (3, (debugfile, "imap_fast_trash: server suggests TRYCREATE\n"));
snprintf (prompt, sizeof (prompt), _("Create %s?"), mbox);
if (option (OPTCONFIRMCREATE) && mutt_yesorno (prompt, 1) < 1)
{
mutt_clear_error ();
goto out;
}
if (imap_create_mailbox (idata, mbox) < 0)
break;
triedcreate = 1;
}
}
while (rc == -2);
if (rc != 0)
{
imap_error ("imap_fast_trash", idata->buf);
goto out;
}
rc = 0;
out:
mutt_buffer_free (&sync_cmd);
FREE (&mx.mbox);
return rc < 0 ? -1 : rc;
}
struct mx_ops mx_imap_ops = {
.open = imap_open_mailbox,
.open_append = imap_open_mailbox_append,
.close = imap_close_mailbox,
.open_msg = imap_fetch_message,
.close_msg = imap_close_message,
.commit_msg = imap_commit_message,
.open_new_msg = imap_open_new_message,
.check = imap_check_mailbox_reopen,
.sync = NULL, /* imap syncing is handled by imap_sync_mailbox */
.save_to_header_cache = imap_save_to_header_cache,
};
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_4051_0 |
crossvul-cpp_data_good_3118_0 | #include <linux/export.h>
#include <linux/bvec.h>
#include <linux/uio.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/splice.h>
#include <net/checksum.h>
#define PIPE_PARANOIA /* for now */
#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
size_t left; \
size_t wanted = n; \
__p = i->iov; \
__v.iov_len = min(n, __p->iov_len - skip); \
if (likely(__v.iov_len)) { \
__v.iov_base = __p->iov_base + skip; \
left = (STEP); \
__v.iov_len -= left; \
skip += __v.iov_len; \
n -= __v.iov_len; \
} else { \
left = 0; \
} \
while (unlikely(!left && n)) { \
__p++; \
__v.iov_len = min(n, __p->iov_len); \
if (unlikely(!__v.iov_len)) \
continue; \
__v.iov_base = __p->iov_base; \
left = (STEP); \
__v.iov_len -= left; \
skip = __v.iov_len; \
n -= __v.iov_len; \
} \
n = wanted - n; \
}
#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
size_t wanted = n; \
__p = i->kvec; \
__v.iov_len = min(n, __p->iov_len - skip); \
if (likely(__v.iov_len)) { \
__v.iov_base = __p->iov_base + skip; \
(void)(STEP); \
skip += __v.iov_len; \
n -= __v.iov_len; \
} \
while (unlikely(n)) { \
__p++; \
__v.iov_len = min(n, __p->iov_len); \
if (unlikely(!__v.iov_len)) \
continue; \
__v.iov_base = __p->iov_base; \
(void)(STEP); \
skip = __v.iov_len; \
n -= __v.iov_len; \
} \
n = wanted; \
}
#define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
struct bvec_iter __start; \
__start.bi_size = n; \
__start.bi_bvec_done = skip; \
__start.bi_idx = 0; \
for_each_bvec(__v, i->bvec, __bi, __start) { \
if (!__v.bv_len) \
continue; \
(void)(STEP); \
} \
}
#define iterate_all_kinds(i, n, v, I, B, K) { \
if (likely(n)) { \
size_t skip = i->iov_offset; \
if (unlikely(i->type & ITER_BVEC)) { \
struct bio_vec v; \
struct bvec_iter __bi; \
iterate_bvec(i, n, v, __bi, skip, (B)) \
} else if (unlikely(i->type & ITER_KVEC)) { \
const struct kvec *kvec; \
struct kvec v; \
iterate_kvec(i, n, v, kvec, skip, (K)) \
} else { \
const struct iovec *iov; \
struct iovec v; \
iterate_iovec(i, n, v, iov, skip, (I)) \
} \
} \
}
#define iterate_and_advance(i, n, v, I, B, K) { \
if (unlikely(i->count < n)) \
n = i->count; \
if (i->count) { \
size_t skip = i->iov_offset; \
if (unlikely(i->type & ITER_BVEC)) { \
const struct bio_vec *bvec = i->bvec; \
struct bio_vec v; \
struct bvec_iter __bi; \
iterate_bvec(i, n, v, __bi, skip, (B)) \
i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
i->nr_segs -= i->bvec - bvec; \
skip = __bi.bi_bvec_done; \
} else if (unlikely(i->type & ITER_KVEC)) { \
const struct kvec *kvec; \
struct kvec v; \
iterate_kvec(i, n, v, kvec, skip, (K)) \
if (skip == kvec->iov_len) { \
kvec++; \
skip = 0; \
} \
i->nr_segs -= kvec - i->kvec; \
i->kvec = kvec; \
} else { \
const struct iovec *iov; \
struct iovec v; \
iterate_iovec(i, n, v, iov, skip, (I)) \
if (skip == iov->iov_len) { \
iov++; \
skip = 0; \
} \
i->nr_segs -= iov - i->iov; \
i->iov = iov; \
} \
i->count -= n; \
i->iov_offset = skip; \
} \
}
static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
size_t skip, copy, left, wanted;
const struct iovec *iov;
char __user *buf;
void *kaddr, *from;
if (unlikely(bytes > i->count))
bytes = i->count;
if (unlikely(!bytes))
return 0;
wanted = bytes;
iov = i->iov;
skip = i->iov_offset;
buf = iov->iov_base + skip;
copy = min(bytes, iov->iov_len - skip);
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
kaddr = kmap_atomic(page);
from = kaddr + offset;
/* first chunk, usually the only one */
left = __copy_to_user_inatomic(buf, from, copy);
copy -= left;
skip += copy;
from += copy;
bytes -= copy;
while (unlikely(!left && bytes)) {
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
left = __copy_to_user_inatomic(buf, from, copy);
copy -= left;
skip = copy;
from += copy;
bytes -= copy;
}
if (likely(!bytes)) {
kunmap_atomic(kaddr);
goto done;
}
offset = from - kaddr;
buf += copy;
kunmap_atomic(kaddr);
copy = min(bytes, iov->iov_len - skip);
}
/* Too bad - revert to non-atomic kmap */
kaddr = kmap(page);
from = kaddr + offset;
left = __copy_to_user(buf, from, copy);
copy -= left;
skip += copy;
from += copy;
bytes -= copy;
while (unlikely(!left && bytes)) {
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
left = __copy_to_user(buf, from, copy);
copy -= left;
skip = copy;
from += copy;
bytes -= copy;
}
kunmap(page);
done:
if (skip == iov->iov_len) {
iov++;
skip = 0;
}
i->count -= wanted - bytes;
i->nr_segs -= iov - i->iov;
i->iov = iov;
i->iov_offset = skip;
return wanted - bytes;
}
static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
size_t skip, copy, left, wanted;
const struct iovec *iov;
char __user *buf;
void *kaddr, *to;
if (unlikely(bytes > i->count))
bytes = i->count;
if (unlikely(!bytes))
return 0;
wanted = bytes;
iov = i->iov;
skip = i->iov_offset;
buf = iov->iov_base + skip;
copy = min(bytes, iov->iov_len - skip);
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
kaddr = kmap_atomic(page);
to = kaddr + offset;
/* first chunk, usually the only one */
left = __copy_from_user_inatomic(to, buf, copy);
copy -= left;
skip += copy;
to += copy;
bytes -= copy;
while (unlikely(!left && bytes)) {
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
left = __copy_from_user_inatomic(to, buf, copy);
copy -= left;
skip = copy;
to += copy;
bytes -= copy;
}
if (likely(!bytes)) {
kunmap_atomic(kaddr);
goto done;
}
offset = to - kaddr;
buf += copy;
kunmap_atomic(kaddr);
copy = min(bytes, iov->iov_len - skip);
}
/* Too bad - revert to non-atomic kmap */
kaddr = kmap(page);
to = kaddr + offset;
left = __copy_from_user(to, buf, copy);
copy -= left;
skip += copy;
to += copy;
bytes -= copy;
while (unlikely(!left && bytes)) {
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
left = __copy_from_user(to, buf, copy);
copy -= left;
skip = copy;
to += copy;
bytes -= copy;
}
kunmap(page);
done:
if (skip == iov->iov_len) {
iov++;
skip = 0;
}
i->count -= wanted - bytes;
i->nr_segs -= iov - i->iov;
i->iov = iov;
i->iov_offset = skip;
return wanted - bytes;
}
#ifdef PIPE_PARANOIA
static bool sanity(const struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
int idx = i->idx;
int next = pipe->curbuf + pipe->nrbufs;
if (i->iov_offset) {
struct pipe_buffer *p;
if (unlikely(!pipe->nrbufs))
goto Bad; // pipe must be non-empty
if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
goto Bad; // must be at the last buffer...
p = &pipe->bufs[idx];
if (unlikely(p->offset + p->len != i->iov_offset))
goto Bad; // ... at the end of segment
} else {
if (idx != (next & (pipe->buffers - 1)))
goto Bad; // must be right after the last buffer
}
return true;
Bad:
printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
pipe->curbuf, pipe->nrbufs, pipe->buffers);
for (idx = 0; idx < pipe->buffers; idx++)
printk(KERN_ERR "[%p %p %d %d]\n",
pipe->bufs[idx].ops,
pipe->bufs[idx].page,
pipe->bufs[idx].offset,
pipe->bufs[idx].len);
WARN_ON(1);
return false;
}
#else
#define sanity(i) true
#endif
static inline int next_idx(int idx, struct pipe_inode_info *pipe)
{
return (idx + 1) & (pipe->buffers - 1);
}
static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
struct pipe_buffer *buf;
size_t off;
int idx;
if (unlikely(bytes > i->count))
bytes = i->count;
if (unlikely(!bytes))
return 0;
if (!sanity(i))
return 0;
off = i->iov_offset;
idx = i->idx;
buf = &pipe->bufs[idx];
if (off) {
if (offset == off && buf->page == page) {
/* merge with the last one */
buf->len += bytes;
i->iov_offset += bytes;
goto out;
}
idx = next_idx(idx, pipe);
buf = &pipe->bufs[idx];
}
if (idx == pipe->curbuf && pipe->nrbufs)
return 0;
pipe->nrbufs++;
buf->ops = &page_cache_pipe_buf_ops;
get_page(buf->page = page);
buf->offset = offset;
buf->len = bytes;
i->iov_offset = offset + bytes;
i->idx = idx;
out:
i->count -= bytes;
return bytes;
}
/*
* Fault in one or more iovecs of the given iov_iter, to a maximum length of
* bytes. For each iovec, fault in each page that constitutes the iovec.
*
* Return 0 on success, or non-zero if the memory could not be accessed (i.e.
* because it is an invalid address).
*/
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
{
size_t skip = i->iov_offset;
const struct iovec *iov;
int err;
struct iovec v;
if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
iterate_iovec(i, bytes, v, iov, skip, ({
err = fault_in_pages_readable(v.iov_base, v.iov_len);
if (unlikely(err))
return err;
0;}))
}
return 0;
}
EXPORT_SYMBOL(iov_iter_fault_in_readable);
void iov_iter_init(struct iov_iter *i, int direction,
const struct iovec *iov, unsigned long nr_segs,
size_t count)
{
/* It will get better. Eventually... */
if (segment_eq(get_fs(), KERNEL_DS)) {
direction |= ITER_KVEC;
i->type = direction;
i->kvec = (struct kvec *)iov;
} else {
i->type = direction;
i->iov = iov;
}
i->nr_segs = nr_segs;
i->iov_offset = 0;
i->count = count;
}
EXPORT_SYMBOL(iov_iter_init);
static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
{
char *from = kmap_atomic(page);
memcpy(to, from + offset, len);
kunmap_atomic(from);
}
static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
{
char *to = kmap_atomic(page);
memcpy(to + offset, from, len);
kunmap_atomic(to);
}
static void memzero_page(struct page *page, size_t offset, size_t len)
{
char *addr = kmap_atomic(page);
memset(addr + offset, 0, len);
kunmap_atomic(addr);
}
static inline bool allocated(struct pipe_buffer *buf)
{
return buf->ops == &default_pipe_buf_ops;
}
static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
{
size_t off = i->iov_offset;
int idx = i->idx;
if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
idx = next_idx(idx, i->pipe);
off = 0;
}
*idxp = idx;
*offp = off;
}
static size_t push_pipe(struct iov_iter *i, size_t size,
int *idxp, size_t *offp)
{
struct pipe_inode_info *pipe = i->pipe;
size_t off;
int idx;
ssize_t left;
if (unlikely(size > i->count))
size = i->count;
if (unlikely(!size))
return 0;
left = size;
data_start(i, &idx, &off);
*idxp = idx;
*offp = off;
if (off) {
left -= PAGE_SIZE - off;
if (left <= 0) {
pipe->bufs[idx].len += size;
return size;
}
pipe->bufs[idx].len = PAGE_SIZE;
idx = next_idx(idx, pipe);
}
while (idx != pipe->curbuf || !pipe->nrbufs) {
struct page *page = alloc_page(GFP_USER);
if (!page)
break;
pipe->nrbufs++;
pipe->bufs[idx].ops = &default_pipe_buf_ops;
pipe->bufs[idx].page = page;
pipe->bufs[idx].offset = 0;
if (left <= PAGE_SIZE) {
pipe->bufs[idx].len = left;
return size;
}
pipe->bufs[idx].len = PAGE_SIZE;
left -= PAGE_SIZE;
idx = next_idx(idx, pipe);
}
return size - left;
}
static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
size_t n, off;
int idx;
if (!sanity(i))
return 0;
bytes = n = push_pipe(i, bytes, &idx, &off);
if (unlikely(!n))
return 0;
for ( ; n; idx = next_idx(idx, pipe), off = 0) {
size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
i->idx = idx;
i->iov_offset = off + chunk;
n -= chunk;
addr += chunk;
}
i->count -= bytes;
return bytes;
}
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
const char *from = addr;
if (unlikely(i->type & ITER_PIPE))
return copy_pipe_to_iter(addr, bytes, i);
iterate_and_advance(i, bytes, v,
__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
v.iov_len),
memcpy_to_page(v.bv_page, v.bv_offset,
(from += v.bv_len) - v.bv_len, v.bv_len),
memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
)
return bytes;
}
EXPORT_SYMBOL(copy_to_iter);
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return 0;
}
iterate_and_advance(i, bytes, v,
__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
)
return bytes;
}
EXPORT_SYMBOL(copy_from_iter);
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return false;
}
if (unlikely(i->count < bytes))
return false;
iterate_all_kinds(i, bytes, v, ({
if (__copy_from_user((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len))
return false;
0;}),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
)
iov_iter_advance(i, bytes);
return true;
}
EXPORT_SYMBOL(copy_from_iter_full);
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return 0;
}
iterate_and_advance(i, bytes, v,
__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
)
return bytes;
}
EXPORT_SYMBOL(copy_from_iter_nocache);
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return false;
}
if (unlikely(i->count < bytes))
return false;
iterate_all_kinds(i, bytes, v, ({
if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len))
return false;
0;}),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
)
iov_iter_advance(i, bytes);
return true;
}
EXPORT_SYMBOL(copy_from_iter_full_nocache);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
if (i->type & (ITER_BVEC|ITER_KVEC)) {
void *kaddr = kmap_atomic(page);
size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
kunmap_atomic(kaddr);
return wanted;
} else if (likely(!(i->type & ITER_PIPE)))
return copy_page_to_iter_iovec(page, offset, bytes, i);
else
return copy_page_to_iter_pipe(page, offset, bytes, i);
}
EXPORT_SYMBOL(copy_page_to_iter);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return 0;
}
if (i->type & (ITER_BVEC|ITER_KVEC)) {
void *kaddr = kmap_atomic(page);
size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
kunmap_atomic(kaddr);
return wanted;
} else
return copy_page_from_iter_iovec(page, offset, bytes, i);
}
EXPORT_SYMBOL(copy_page_from_iter);
static size_t pipe_zero(size_t bytes, struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
size_t n, off;
int idx;
if (!sanity(i))
return 0;
bytes = n = push_pipe(i, bytes, &idx, &off);
if (unlikely(!n))
return 0;
for ( ; n; idx = next_idx(idx, pipe), off = 0) {
size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
memzero_page(pipe->bufs[idx].page, off, chunk);
i->idx = idx;
i->iov_offset = off + chunk;
n -= chunk;
}
i->count -= bytes;
return bytes;
}
size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
{
if (unlikely(i->type & ITER_PIPE))
return pipe_zero(bytes, i);
iterate_and_advance(i, bytes, v,
__clear_user(v.iov_base, v.iov_len),
memzero_page(v.bv_page, v.bv_offset, v.bv_len),
memset(v.iov_base, 0, v.iov_len)
)
return bytes;
}
EXPORT_SYMBOL(iov_iter_zero);
size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
char *kaddr = kmap_atomic(page), *p = kaddr + offset;
if (unlikely(i->type & ITER_PIPE)) {
kunmap_atomic(kaddr);
WARN_ON(1);
return 0;
}
iterate_all_kinds(i, bytes, v,
__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len),
memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
)
kunmap_atomic(kaddr);
return bytes;
}
EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
static inline void pipe_truncate(struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
if (pipe->nrbufs) {
size_t off = i->iov_offset;
int idx = i->idx;
int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
if (off) {
pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
idx = next_idx(idx, pipe);
nrbufs++;
}
while (pipe->nrbufs > nrbufs) {
pipe_buf_release(pipe, &pipe->bufs[idx]);
idx = next_idx(idx, pipe);
pipe->nrbufs--;
}
}
}
static void pipe_advance(struct iov_iter *i, size_t size)
{
struct pipe_inode_info *pipe = i->pipe;
if (unlikely(i->count < size))
size = i->count;
if (size) {
struct pipe_buffer *buf;
size_t off = i->iov_offset, left = size;
int idx = i->idx;
if (off) /* make it relative to the beginning of buffer */
left += off - pipe->bufs[idx].offset;
while (1) {
buf = &pipe->bufs[idx];
if (left <= buf->len)
break;
left -= buf->len;
idx = next_idx(idx, pipe);
}
i->idx = idx;
i->iov_offset = buf->offset + left;
}
i->count -= size;
/* ... and discard everything past that point */
pipe_truncate(i);
}
void iov_iter_advance(struct iov_iter *i, size_t size)
{
if (unlikely(i->type & ITER_PIPE)) {
pipe_advance(i, size);
return;
}
iterate_and_advance(i, size, v, 0, 0, 0)
}
EXPORT_SYMBOL(iov_iter_advance);
/*
* Return the count of just the current iov_iter segment.
*/
size_t iov_iter_single_seg_count(const struct iov_iter *i)
{
if (unlikely(i->type & ITER_PIPE))
return i->count; // it is a silly place, anyway
if (i->nr_segs == 1)
return i->count;
else if (i->type & ITER_BVEC)
return min(i->count, i->bvec->bv_len - i->iov_offset);
else
return min(i->count, i->iov->iov_len - i->iov_offset);
}
EXPORT_SYMBOL(iov_iter_single_seg_count);
void iov_iter_kvec(struct iov_iter *i, int direction,
const struct kvec *kvec, unsigned long nr_segs,
size_t count)
{
BUG_ON(!(direction & ITER_KVEC));
i->type = direction;
i->kvec = kvec;
i->nr_segs = nr_segs;
i->iov_offset = 0;
i->count = count;
}
EXPORT_SYMBOL(iov_iter_kvec);
void iov_iter_bvec(struct iov_iter *i, int direction,
const struct bio_vec *bvec, unsigned long nr_segs,
size_t count)
{
BUG_ON(!(direction & ITER_BVEC));
i->type = direction;
i->bvec = bvec;
i->nr_segs = nr_segs;
i->iov_offset = 0;
i->count = count;
}
EXPORT_SYMBOL(iov_iter_bvec);
void iov_iter_pipe(struct iov_iter *i, int direction,
struct pipe_inode_info *pipe,
size_t count)
{
BUG_ON(direction != ITER_PIPE);
WARN_ON(pipe->nrbufs == pipe->buffers);
i->type = direction;
i->pipe = pipe;
i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
i->iov_offset = 0;
i->count = count;
}
EXPORT_SYMBOL(iov_iter_pipe);
unsigned long iov_iter_alignment(const struct iov_iter *i)
{
unsigned long res = 0;
size_t size = i->count;
if (unlikely(i->type & ITER_PIPE)) {
if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
return size | i->iov_offset;
return size;
}
iterate_all_kinds(i, size, v,
(res |= (unsigned long)v.iov_base | v.iov_len, 0),
res |= v.bv_offset | v.bv_len,
res |= (unsigned long)v.iov_base | v.iov_len
)
return res;
}
EXPORT_SYMBOL(iov_iter_alignment);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
{
unsigned long res = 0;
size_t size = i->count;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return ~0U;
}
iterate_all_kinds(i, size, v,
(res |= (!res ? 0 : (unsigned long)v.iov_base) |
(size != v.iov_len ? size : 0), 0),
(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
(size != v.bv_len ? size : 0)),
(res |= (!res ? 0 : (unsigned long)v.iov_base) |
(size != v.iov_len ? size : 0))
);
return res;
}
EXPORT_SYMBOL(iov_iter_gap_alignment);
static inline size_t __pipe_get_pages(struct iov_iter *i,
size_t maxsize,
struct page **pages,
int idx,
size_t *start)
{
struct pipe_inode_info *pipe = i->pipe;
ssize_t n = push_pipe(i, maxsize, &idx, start);
if (!n)
return -EFAULT;
maxsize = n;
n += *start;
while (n > 0) {
get_page(*pages++ = pipe->bufs[idx].page);
idx = next_idx(idx, pipe);
n -= PAGE_SIZE;
}
return maxsize;
}
static ssize_t pipe_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize, unsigned maxpages,
size_t *start)
{
unsigned npages;
size_t capacity;
int idx;
if (!maxsize)
return 0;
if (!sanity(i))
return -EFAULT;
data_start(i, &idx, start);
/* some of this one + all after this one */
npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
capacity = min(npages,maxpages) * PAGE_SIZE - *start;
return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
}
ssize_t iov_iter_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize, unsigned maxpages,
size_t *start)
{
if (maxsize > i->count)
maxsize = i->count;
if (unlikely(i->type & ITER_PIPE))
return pipe_get_pages(i, pages, maxsize, maxpages, start);
iterate_all_kinds(i, maxsize, v, ({
unsigned long addr = (unsigned long)v.iov_base;
size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
int n;
int res;
if (len > maxpages * PAGE_SIZE)
len = maxpages * PAGE_SIZE;
addr &= ~(PAGE_SIZE - 1);
n = DIV_ROUND_UP(len, PAGE_SIZE);
res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
if (unlikely(res < 0))
return res;
return (res == n ? len : res * PAGE_SIZE) - *start;
0;}),({
/* can't be more than PAGE_SIZE */
*start = v.bv_offset;
get_page(*pages = v.bv_page);
return v.bv_len;
}),({
return -EFAULT;
})
)
return 0;
}
EXPORT_SYMBOL(iov_iter_get_pages);
static struct page **get_pages_array(size_t n)
{
struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
if (!p)
p = vmalloc(n * sizeof(struct page *));
return p;
}
static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
size_t *start)
{
struct page **p;
size_t n;
int idx;
int npages;
if (!maxsize)
return 0;
if (!sanity(i))
return -EFAULT;
data_start(i, &idx, start);
/* some of this one + all after this one */
npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
n = npages * PAGE_SIZE - *start;
if (maxsize > n)
maxsize = n;
else
npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
p = get_pages_array(npages);
if (!p)
return -ENOMEM;
n = __pipe_get_pages(i, maxsize, p, idx, start);
if (n > 0)
*pages = p;
else
kvfree(p);
return n;
}
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
size_t *start)
{
struct page **p;
if (maxsize > i->count)
maxsize = i->count;
if (unlikely(i->type & ITER_PIPE))
return pipe_get_pages_alloc(i, pages, maxsize, start);
iterate_all_kinds(i, maxsize, v, ({
unsigned long addr = (unsigned long)v.iov_base;
size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
int n;
int res;
addr &= ~(PAGE_SIZE - 1);
n = DIV_ROUND_UP(len, PAGE_SIZE);
p = get_pages_array(n);
if (!p)
return -ENOMEM;
res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
if (unlikely(res < 0)) {
kvfree(p);
return res;
}
*pages = p;
return (res == n ? len : res * PAGE_SIZE) - *start;
0;}),({
/* can't be more than PAGE_SIZE */
*start = v.bv_offset;
*pages = p = get_pages_array(1);
if (!p)
return -ENOMEM;
get_page(*p = v.bv_page);
return v.bv_len;
}),({
return -EFAULT;
})
)
return 0;
}
EXPORT_SYMBOL(iov_iter_get_pages_alloc);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
struct iov_iter *i)
{
char *to = addr;
__wsum sum, next;
size_t off = 0;
sum = *csum;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return 0;
}
iterate_and_advance(i, bytes, v, ({
int err = 0;
next = csum_and_copy_from_user(v.iov_base,
(to += v.iov_len) - v.iov_len,
v.iov_len, 0, &err);
if (!err) {
sum = csum_block_add(sum, next, off);
off += v.iov_len;
}
err ? v.iov_len : 0;
}), ({
char *p = kmap_atomic(v.bv_page);
next = csum_partial_copy_nocheck(p + v.bv_offset,
(to += v.bv_len) - v.bv_len,
v.bv_len, 0);
kunmap_atomic(p);
sum = csum_block_add(sum, next, off);
off += v.bv_len;
}),({
next = csum_partial_copy_nocheck(v.iov_base,
(to += v.iov_len) - v.iov_len,
v.iov_len, 0);
sum = csum_block_add(sum, next, off);
off += v.iov_len;
})
)
*csum = sum;
return bytes;
}
EXPORT_SYMBOL(csum_and_copy_from_iter);
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
struct iov_iter *i)
{
char *to = addr;
__wsum sum, next;
size_t off = 0;
sum = *csum;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return false;
}
if (unlikely(i->count < bytes))
return false;
iterate_all_kinds(i, bytes, v, ({
int err = 0;
next = csum_and_copy_from_user(v.iov_base,
(to += v.iov_len) - v.iov_len,
v.iov_len, 0, &err);
if (err)
return false;
sum = csum_block_add(sum, next, off);
off += v.iov_len;
0;
}), ({
char *p = kmap_atomic(v.bv_page);
next = csum_partial_copy_nocheck(p + v.bv_offset,
(to += v.bv_len) - v.bv_len,
v.bv_len, 0);
kunmap_atomic(p);
sum = csum_block_add(sum, next, off);
off += v.bv_len;
}),({
next = csum_partial_copy_nocheck(v.iov_base,
(to += v.iov_len) - v.iov_len,
v.iov_len, 0);
sum = csum_block_add(sum, next, off);
off += v.iov_len;
})
)
*csum = sum;
iov_iter_advance(i, bytes);
return true;
}
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
struct iov_iter *i)
{
const char *from = addr;
__wsum sum, next;
size_t off = 0;
sum = *csum;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1); /* for now */
return 0;
}
iterate_and_advance(i, bytes, v, ({
int err = 0;
next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
v.iov_base,
v.iov_len, 0, &err);
if (!err) {
sum = csum_block_add(sum, next, off);
off += v.iov_len;
}
err ? v.iov_len : 0;
}), ({
char *p = kmap_atomic(v.bv_page);
next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
p + v.bv_offset,
v.bv_len, 0);
kunmap_atomic(p);
sum = csum_block_add(sum, next, off);
off += v.bv_len;
}),({
next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
v.iov_base,
v.iov_len, 0);
sum = csum_block_add(sum, next, off);
off += v.iov_len;
})
)
*csum = sum;
return bytes;
}
EXPORT_SYMBOL(csum_and_copy_to_iter);
int iov_iter_npages(const struct iov_iter *i, int maxpages)
{
size_t size = i->count;
int npages = 0;
if (!size)
return 0;
if (unlikely(i->type & ITER_PIPE)) {
struct pipe_inode_info *pipe = i->pipe;
size_t off;
int idx;
if (!sanity(i))
return 0;
data_start(i, &idx, &off);
/* some of this one + all after this one */
npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
if (npages >= maxpages)
return maxpages;
} else iterate_all_kinds(i, size, v, ({
unsigned long p = (unsigned long)v.iov_base;
npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
- p / PAGE_SIZE;
if (npages >= maxpages)
return maxpages;
0;}),({
npages++;
if (npages >= maxpages)
return maxpages;
}),({
unsigned long p = (unsigned long)v.iov_base;
npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
- p / PAGE_SIZE;
if (npages >= maxpages)
return maxpages;
})
)
return npages;
}
EXPORT_SYMBOL(iov_iter_npages);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
{
*new = *old;
if (unlikely(new->type & ITER_PIPE)) {
WARN_ON(1);
return NULL;
}
if (new->type & ITER_BVEC)
return new->bvec = kmemdup(new->bvec,
new->nr_segs * sizeof(struct bio_vec),
flags);
else
/* iovec and kvec have identical layout */
return new->iov = kmemdup(new->iov,
new->nr_segs * sizeof(struct iovec),
flags);
}
EXPORT_SYMBOL(dup_iter);
/**
* import_iovec() - Copy an array of &struct iovec from userspace
* into the kernel, check that it is valid, and initialize a new
* &struct iov_iter iterator to access it.
*
* @type: One of %READ or %WRITE.
* @uvector: Pointer to the userspace array.
* @nr_segs: Number of elements in userspace array.
* @fast_segs: Number of elements in @iov.
* @iov: (input and output parameter) Pointer to pointer to (usually small
* on-stack) kernel array.
* @i: Pointer to iterator that will be initialized on success.
*
* If the array pointed to by *@iov is large enough to hold all @nr_segs,
* then this function places %NULL in *@iov on return. Otherwise, a new
* array will be allocated and the result placed in *@iov. This means that
* the caller may call kfree() on *@iov regardless of whether the small
* on-stack array was used or not (and regardless of whether this function
* returns an error or not).
*
* Return: 0 on success or negative error code on error.
*/
int import_iovec(int type, const struct iovec __user * uvector,
unsigned nr_segs, unsigned fast_segs,
struct iovec **iov, struct iov_iter *i)
{
ssize_t n;
struct iovec *p;
n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
*iov, &p);
if (n < 0) {
if (p != *iov)
kfree(p);
*iov = NULL;
return n;
}
iov_iter_init(i, type, p, nr_segs, n);
*iov = p == *iov ? NULL : p;
return 0;
}
EXPORT_SYMBOL(import_iovec);
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
unsigned nr_segs, unsigned fast_segs,
struct iovec **iov, struct iov_iter *i)
{
ssize_t n;
struct iovec *p;
n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
*iov, &p);
if (n < 0) {
if (p != *iov)
kfree(p);
*iov = NULL;
return n;
}
iov_iter_init(i, type, p, nr_segs, n);
*iov = p == *iov ? NULL : p;
return 0;
}
#endif
int import_single_range(int rw, void __user *buf, size_t len,
struct iovec *iov, struct iov_iter *i)
{
if (len > MAX_RW_COUNT)
len = MAX_RW_COUNT;
if (unlikely(!access_ok(!rw, buf, len)))
return -EFAULT;
iov->iov_base = buf;
iov->iov_len = len;
iov_iter_init(i, rw, iov, 1, len);
return 0;
}
EXPORT_SYMBOL(import_single_range);
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_3118_0 |
crossvul-cpp_data_good_3568_9 | /* -*- Mode: c; c-basic-offset: 2 -*-
*
* raptor_rss.c - Raptor Feeds (RSS and Atom) tag soup parser
*
* Copyright (C) 2003-2010, David Beckett http://www.dajobe.org/
* Copyright (C) 2003-2005, University of Bristol, UK http://www.bristol.ac.uk/
*
* This package is Free Software and part of Redland http://librdf.org/
*
* It is licensed under the following three licenses as alternatives:
* 1. GNU Lesser General Public License (LGPL) V2.1 or any newer version
* 2. GNU General Public License (GPL) V2 or any newer version
* 3. Apache License, V2.0 or any newer version
*
* You may not use this file except in compliance with at least one of
* the above three licenses.
*
* See LICENSE.html or LICENSE.txt at the top of this package for the
* complete terms and further detail along with the license texts for
* the licenses in COPYING.LIB, COPYING and LICENSE-2.0.txt respectively.
*
*
*/
#ifdef HAVE_CONFIG_H
#include <raptor_config.h>
#endif
#ifdef WIN32
#include <win32_raptor_config.h>
#endif
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <stdarg.h>
#ifdef HAVE_ERRNO_H
#include <errno.h>
#endif
/* Raptor includes */
#include "raptor2.h"
#include "raptor_internal.h"
#include "raptor_rss.h"
/* local prototypes */
static void raptor_rss_uplift_items(raptor_parser* rdf_parser);
static int raptor_rss_emit(raptor_parser* rdf_parser);
static void raptor_rss_start_element_handler(void *user_data, raptor_xml_element* xml_element);
static void raptor_rss_end_element_handler(void *user_data, raptor_xml_element* xml_element);
static void raptor_rss_cdata_handler(void *user_data, raptor_xml_element* xml_element, const unsigned char *s, int len);
static void raptor_rss_comment_handler(void *user_data, raptor_xml_element* xml_element, const unsigned char *s);
static void raptor_rss_sax2_new_namespace_handler(void *user_data, raptor_namespace* nspace);
/*
* RSS parser object
*/
struct raptor_rss_parser_s {
/* static model */
raptor_rss_model model;
/* current line */
char *line;
/* current line length */
int line_length;
/* current char in line buffer */
int offset;
/* static statement for use in passing to user code */
raptor_statement statement;
raptor_sax2 *sax2;
/* rss node type of current CONTAINER item */
raptor_rss_type current_type;
/* one place stack */
raptor_rss_type prev_type;
raptor_rss_fields_type current_field;
/* emptyness of current element */
int element_is_empty;
/* stack of namespaces */
raptor_namespace_stack *nstack;
/* non-0 if this is an atom 1.0 parser */
int is_atom;
/* namespaces declared here */
raptor_namespace* nspaces[RAPTOR_RSS_NAMESPACES_SIZE];
/* namespaces seen during parsing or creating output model */
char nspaces_seen[RAPTOR_RSS_NAMESPACES_SIZE];
/* current BLOCK pointer (inside CONTAINER of type current_type) */
raptor_rss_block *current_block;
};
typedef struct raptor_rss_parser_s raptor_rss_parser;
typedef enum {
RAPTOR_RSS_CONTENT_TYPE_NONE,
RAPTOR_RSS_CONTENT_TYPE_XML,
RAPTOR_RSS_CONTENT_TYPE_TEXT
} raptor_rss_content_type;
struct raptor_rss_element_s
{
raptor_world* world;
raptor_uri* uri;
/* Two types of content */
raptor_rss_content_type type;
/* 1) XML */
raptor_xml_writer* xml_writer;
/* XML written to this iostream to the xml_content string */
raptor_iostream* iostream;
/* ends up here */
void *xml_content;
size_t xml_content_length;
/* 2) cdata */
raptor_stringbuffer* sb;
};
typedef struct raptor_rss_element_s raptor_rss_element;
static void
raptor_free_rss_element(raptor_rss_element *rss_element)
{
if(rss_element->uri)
raptor_free_uri(rss_element->uri);
if(rss_element->type == RAPTOR_RSS_CONTENT_TYPE_XML) {
if(rss_element->xml_writer)
raptor_free_xml_writer(rss_element->xml_writer);
if(rss_element->iostream)
raptor_free_iostream(rss_element->iostream);
if(rss_element->xml_content)
raptor_free_memory(rss_element->xml_content);
}
if(rss_element->sb)
raptor_free_stringbuffer(rss_element->sb);
RAPTOR_FREE(raptor_rss_element, rss_element);
}
static int
raptor_rss_parse_init(raptor_parser* rdf_parser, const char *name)
{
raptor_rss_parser* rss_parser = (raptor_rss_parser*)rdf_parser->context;
raptor_sax2* sax2;
int n;
raptor_rss_common_init(rdf_parser->world);
raptor_rss_model_init(rdf_parser->world, &rss_parser->model);
rss_parser->prev_type = RAPTOR_RSS_NONE;
rss_parser->current_field = RAPTOR_RSS_FIELD_NONE;
rss_parser->current_type = RAPTOR_RSS_NONE;
rss_parser->current_block = NULL;
if(rss_parser->sax2) {
raptor_free_sax2(rss_parser->sax2);
rss_parser->sax2 = NULL;
}
rss_parser->nstack = raptor_new_namespaces(rdf_parser->world, 1);
/* Initialise the namespaces */
for(n = 0; n < RAPTOR_RSS_NAMESPACES_SIZE; n++) {
unsigned const char* prefix;
raptor_uri* uri;
raptor_namespace* nspace = NULL;
prefix = (unsigned const char*)raptor_rss_namespaces_info[n].prefix;
uri = rdf_parser->world->rss_namespaces_info_uris[n];
if(prefix && uri)
nspace = raptor_new_namespace_from_uri(rss_parser->nstack,
prefix, uri, 0);
rss_parser->nspaces[n] = nspace;
}
sax2 = raptor_new_sax2(rdf_parser->world, &rdf_parser->locator, rdf_parser);
rss_parser->sax2 = sax2;
raptor_sax2_set_start_element_handler(sax2, raptor_rss_start_element_handler);
raptor_sax2_set_end_element_handler(sax2, raptor_rss_end_element_handler);
raptor_sax2_set_characters_handler(sax2, raptor_rss_cdata_handler);
raptor_sax2_set_cdata_handler(sax2, raptor_rss_cdata_handler);
raptor_sax2_set_comment_handler(sax2, raptor_rss_comment_handler);
raptor_sax2_set_namespace_handler(sax2, raptor_rss_sax2_new_namespace_handler);
raptor_statement_init(&rss_parser->statement, rdf_parser->world);
return 0;
}
static void
raptor_rss_parse_terminate(raptor_parser *rdf_parser)
{
raptor_rss_parser *rss_parser = (raptor_rss_parser*)rdf_parser->context;
int n;
if(rss_parser->sax2)
raptor_free_sax2(rss_parser->sax2);
raptor_rss_model_clear(&rss_parser->model);
for(n = 0; n < RAPTOR_RSS_NAMESPACES_SIZE; n++) {
if(rss_parser->nspaces[n])
raptor_free_namespace(rss_parser->nspaces[n]);
}
if(rss_parser->nstack)
raptor_free_namespaces(rss_parser->nstack);
raptor_rss_common_terminate(rdf_parser->world);
}
static int
raptor_rss_parse_start(raptor_parser *rdf_parser)
{
raptor_uri *uri = rdf_parser->base_uri;
raptor_rss_parser* rss_parser = (raptor_rss_parser*)rdf_parser->context;
int n;
/* base URI required for RSS */
if(!uri)
return 1;
for(n = 0; n < RAPTOR_RSS_NAMESPACES_SIZE; n++)
rss_parser->nspaces_seen[n] = 'N';
/* Optionally forbid internal network and file requests in the XML parser */
raptor_sax2_set_option(rss_parser->sax2,
RAPTOR_OPTION_NO_NET, NULL,
RAPTOR_OPTIONS_GET_NUMERIC(rdf_parser, RAPTOR_OPTION_NO_NET));
raptor_sax2_set_option(rss_parser->sax2,
RAPTOR_OPTION_NO_FILE, NULL,
RAPTOR_OPTIONS_GET_NUMERIC(rdf_parser, RAPTOR_OPTION_NO_FILE));
raptor_sax2_set_option(rss_parser->sax2,
RAPTOR_OPTION_LOAD_EXTERNAL_ENTITIES, NULL,
RAPTOR_OPTIONS_GET_NUMERIC(rdf_parser, RAPTOR_OPTION_LOAD_EXTERNAL_ENTITIES));
if(rdf_parser->uri_filter)
raptor_sax2_set_uri_filter(rss_parser->sax2, rdf_parser->uri_filter,
rdf_parser->uri_filter_user_data);
raptor_sax2_parse_start(rss_parser->sax2, uri);
return 0;
}
static int
raptor_rss_add_container(raptor_rss_parser *rss_parser, const char *name)
{
raptor_rss_type type = RAPTOR_RSS_NONE;
if(!strcmp(name, "rss") || !strcmp(name, "rdf") || !strcmp(name, "RDF")) {
/* rss */
} else if(!raptor_strcasecmp(name, "channel")) {
/* rss or atom 0.3 channel */
type = RAPTOR_RSS_CHANNEL;
} else if(!strcmp(name, "feed")) {
/* atom 1.0 feed */
type = RAPTOR_RSS_CHANNEL;
rss_parser->is_atom = 1;
} else if(!strcmp(name, "item")) {
type = RAPTOR_RSS_ITEM;
} else if(!strcmp(name, "entry")) {
type = RAPTOR_RSS_ITEM;
rss_parser->is_atom = 1;
} else {
int i;
for(i = 0; i < RAPTOR_RSS_COMMON_SIZE; i++) {
if(!(raptor_rss_items_info[i].flags & RAPTOR_RSS_ITEM_CONTAINER))
continue;
if(!strcmp(name, raptor_rss_items_info[i].name)) {
/* rss and atom clash on the author name field (rss) or type (atom) */
if(i != RAPTOR_ATOM_AUTHOR ||
(i == RAPTOR_ATOM_AUTHOR && rss_parser->is_atom)) {
type = (raptor_rss_type)i;
break;
}
}
}
}
if(type != RAPTOR_RSS_NONE) {
if(type == RAPTOR_RSS_ITEM)
raptor_rss_model_add_item(&rss_parser->model);
else
raptor_rss_model_add_common(&rss_parser->model, type);
/* Inner container - push the current type onto a 1-place stack */
if(rss_parser->current_type != RAPTOR_RSS_NONE)
rss_parser->prev_type = rss_parser->current_type;
rss_parser->current_type = type;
}
return (type == RAPTOR_RSS_NONE);
}
static raptor_uri*
raptor_rss_promote_namespace_uri(raptor_world *world, raptor_uri* nspace_URI)
{
/* RSS 0.9 and RSS 1.1 namespaces => RSS 1.0 namespace */
if((raptor_uri_equals(nspace_URI,
world->rss_namespaces_info_uris[RSS0_9_NS]) ||
raptor_uri_equals(nspace_URI,
world->rss_namespaces_info_uris[RSS1_1_NS]))) {
nspace_URI = world->rss_namespaces_info_uris[RSS1_0_NS];
}
/* Atom 0.3 namespace => Atom 1.0 namespace */
if(raptor_uri_equals(nspace_URI,
world->rss_namespaces_info_uris[ATOM0_3_NS])) {
nspace_URI = world->rss_namespaces_info_uris[ATOM1_0_NS];
}
return nspace_URI;
}
static raptor_rss_item*
raptor_rss_get_current_item(raptor_rss_parser *rss_parser)
{
raptor_rss_item* item;
if(rss_parser->current_type == RAPTOR_RSS_ITEM)
item = rss_parser->model.last;
else
item = raptor_rss_model_get_common(&rss_parser->model,
rss_parser->current_type);
return item;
}
static int
raptor_rss_block_set_field(raptor_world *world, raptor_uri *base_uri,
raptor_rss_block *block,
const raptor_rss_block_field_info *bfi,
const char *string)
{
int attribute_type = bfi->attribute_type;
int offset = bfi->offset;
if(attribute_type == RSS_BLOCK_FIELD_TYPE_URL) {
raptor_uri* uri;
uri = raptor_new_uri_relative_to_base(world, base_uri,
(const unsigned char*)string);
if(!uri)
return 1;
block->urls[offset] = uri;
} else if(attribute_type == RSS_BLOCK_FIELD_TYPE_STRING) {
size_t len = strlen(string);
block->strings[offset] = RAPTOR_MALLOC(char*, len + 1);
if(!block->strings[offset])
return 1;
memcpy(block->strings[offset], string, len+1);
} else {
#ifdef RAPTOR_DEBUG
RAPTOR_FATAL2("Found unknown attribute_type %d\n", attribute_type);
#endif
return 1;
}
return 0;
}
static void
raptor_rss_start_element_handler(void *user_data,
raptor_xml_element* xml_element)
{
raptor_parser *rdf_parser;
raptor_rss_parser *rss_parser;
raptor_rss_block *block = NULL;
raptor_uri* base_uri;
raptor_qname *el_qname;
const unsigned char *name;
int ns_attributes_count;
raptor_qname** named_attrs;
const raptor_namespace* el_nspace;
raptor_rss_element* rss_element;
int i;
rdf_parser = (raptor_parser*)user_data;
rss_parser = (raptor_rss_parser*)rdf_parser->context;
rss_element = RAPTOR_CALLOC(raptor_rss_element*, 1, sizeof(*rss_element));
if(!rss_element) {
rdf_parser->failed = 1;
return;
}
rss_element->world = rdf_parser->world;
rss_element->sb = raptor_new_stringbuffer();
xml_element->user_data = rss_element;
if(xml_element->parent) {
raptor_rss_element* parent_rss_element;
parent_rss_element = (raptor_rss_element*)(xml_element->parent->user_data);
if(parent_rss_element->xml_writer)
rss_element->xml_writer = parent_rss_element->xml_writer;
}
if(rss_element->xml_writer) {
raptor_xml_writer_start_element(rss_element->xml_writer, xml_element);
return;
}
el_qname = raptor_xml_element_get_name(xml_element);
name = el_qname->local_name;
el_nspace = el_qname->nspace;
named_attrs = raptor_xml_element_get_attributes(xml_element);
ns_attributes_count = raptor_xml_element_get_attributes_count(xml_element);
base_uri = raptor_sax2_inscope_base_uri(rss_parser->sax2);
/* No container type - identify and record in rss_parser->current_type
* either as a top-level container or an inner-container */
if(!raptor_rss_add_container(rss_parser, (const char*)name)) {
#ifdef RAPTOR_DEBUG
if(1) {
raptor_rss_type old_type = rss_parser->prev_type;
if(old_type != rss_parser->current_type && old_type != RAPTOR_RSS_NONE)
RAPTOR_DEBUG5("FOUND inner container type %d - %s INSIDE current container type %d - %s\n", rss_parser->current_type,
raptor_rss_items_info[rss_parser->current_type].name,
old_type, raptor_rss_items_info[old_type].name);
else
RAPTOR_DEBUG3("FOUND container type %d - %s\n",
rss_parser->current_type,
raptor_rss_items_info[rss_parser->current_type].name);
}
#endif
/* check a few container attributes */
if(named_attrs) {
raptor_rss_item* update_item = raptor_rss_get_current_item(rss_parser);
for(i = 0; i < ns_attributes_count; i++) {
raptor_qname* attr = named_attrs[i];
const char* attrName = (const char*)attr->local_name;
const unsigned char* attrValue = attr->value;
RAPTOR_DEBUG3(" container attribute %s=%s\n", attrName, attrValue);
if(!strcmp(attrName, "about")) {
if(update_item) {
update_item->uri = raptor_new_uri(rdf_parser->world, attrValue);
update_item->term = raptor_new_term_from_uri(rdf_parser->world,
update_item->uri);
}
}
}
}
return;
} else if(rss_parser->current_type == RAPTOR_RSS_NONE) {
RAPTOR_DEBUG2("Unknown container element named %s\n", name);
/* Nothing more that can be done with unknown element - skip it */
return;
}
/* have container (current_type) so this element is inside it is either:
* 1. a metadata block element (such as rss:enclosure)
* 2. a field (such as atom:title)
*/
/* Find field ID */
rss_parser->current_field = RAPTOR_RSS_FIELD_UNKNOWN;
for(i = 0; i < RAPTOR_RSS_FIELDS_SIZE; i++) {
raptor_uri* nspace_URI;
raptor_uri* field_nspace_URI;
rss_info_namespace nsid = raptor_rss_fields_info[i].nspace;
if(strcmp((const char*)name, raptor_rss_fields_info[i].name))
continue;
if(!el_nspace) {
if(nsid != RSS_NO_NS && nsid != RSS1_0_NS && nsid != RSS0_91_NS &&
nsid != RSS0_9_NS && nsid != RSS1_1_NS)
continue;
/* Matches if the element has no namespace and field is not atom */
rss_parser->current_field = (raptor_rss_fields_type)i;
break;
}
/* Promote element namespaces */
nspace_URI = raptor_rss_promote_namespace_uri(rdf_parser->world,
raptor_namespace_get_uri(el_nspace));
field_nspace_URI = rdf_parser->world->rss_namespaces_info_uris[raptor_rss_fields_info[i].nspace];
if(raptor_uri_equals(nspace_URI,
field_nspace_URI)) {
rss_parser->current_field = (raptor_rss_fields_type)i;
break;
}
}
if(rss_parser->current_field == RAPTOR_RSS_FIELD_UNKNOWN) {
RAPTOR_DEBUG3("Unknown field element named %s inside type %s\n", name,
raptor_rss_items_info[rss_parser->current_type].name);
return;
}
/* Found a block element to process */
if(raptor_rss_fields_info[rss_parser->current_field].flags &
RAPTOR_RSS_INFO_FLAG_BLOCK_VALUE) {
raptor_rss_type block_type;
raptor_rss_item* update_item;
const unsigned char *id;
raptor_term* block_term;
block_type = raptor_rss_fields_info[rss_parser->current_field].block_type;
RAPTOR_DEBUG3("FOUND new block type %d - %s\n", block_type,
raptor_rss_items_info[block_type].name);
update_item = raptor_rss_get_current_item(rss_parser);
id = raptor_world_generate_bnodeid(rdf_parser->world);
block_term = raptor_new_term_from_blank(rdf_parser->world, id);
RAPTOR_FREE(char*, id);
block = raptor_new_rss_block(rdf_parser->world, block_type, block_term);
raptor_free_term(block_term);
raptor_rss_item_add_block(update_item, block);
rss_parser->current_block = block;
rss_parser->nspaces_seen[raptor_rss_items_info[block_type].nspace] = 'Y';
/* Now check block attributes */
if(named_attrs) {
for(i = 0; i < ns_attributes_count; i++) {
raptor_qname* attr = named_attrs[i];
const char* attrName = (const char*)attr->local_name;
const unsigned char* attrValue = attr->value;
const raptor_rss_block_field_info *bfi;
int offset = -1;
for(bfi = &raptor_rss_block_fields_info[0];
bfi->type != RAPTOR_RSS_NONE;
bfi++) {
if(!bfi->attribute)
continue;
if(bfi->type == block_type && !strcmp(attrName, bfi->attribute)) {
offset = bfi->offset;
break;
}
}
if(offset < 0)
continue;
/* Found attribute for this block type */
RAPTOR_DEBUG3(" found block attribute %s=%s\n", attrName, attrValue);
if(raptor_rss_block_set_field(rdf_parser->world, base_uri,
block, bfi, (const char*)attrValue)) {
rdf_parser->failed = 1;
return;
}
}
}
return;
}
/* Process field */
RAPTOR_DEBUG4("FOUND field %d - %s inside type %s\n",
rss_parser->current_field,
raptor_rss_fields_info[rss_parser->current_field].name,
raptor_rss_items_info[rss_parser->current_type].name);
/* Mark namespace seen in new field */
if(1) {
rss_info_namespace ns_index;
ns_index = raptor_rss_fields_info[rss_parser->current_field].nspace;
rss_parser->nspaces_seen[ns_index] = 'Y';
}
/* Now check for field attributes */
if(named_attrs) {
for(i = 0; i < ns_attributes_count; i++) {
raptor_qname* attr = named_attrs[i];
const unsigned char* attrName = attr->local_name;
const unsigned char* attrValue = attr->value;
RAPTOR_DEBUG3(" attribute %s=%s\n", attrName, attrValue);
/* Pick a few attributes to care about */
if(!strcmp((const char*)attrName, "isPermaLink")) {
raptor_rss_item* update_item = rss_parser->model.last;
if(!strcmp((const char*)name, "guid")) {
/* <guid isPermaLink="..."> */
if(update_item) {
raptor_rss_field* field = raptor_rss_new_field(rdf_parser->world);
RAPTOR_DEBUG1("fa1 - ");
raptor_rss_item_add_field(update_item, RAPTOR_RSS_FIELD_GUID, field);
if(!strcmp((const char*)attrValue, "true")) {
RAPTOR_DEBUG2(" setting guid to URI '%s'\n", attrValue);
field->uri = raptor_new_uri_relative_to_base(rdf_parser->world, base_uri,
(const unsigned char*)attrValue);
} else {
size_t len = strlen((const char*)attrValue);
RAPTOR_DEBUG2(" setting guid to string '%s'\n", attrValue);
field->value = RAPTOR_MALLOC(unsigned char*, len + 1);
if(!field->value) {
rdf_parser->failed = 1;
return;
}
memcpy(field->value, attrValue, len + 1);
}
}
}
} else if(!strcmp((const char*)attrName, "href")) {
if(rss_parser->current_field == RAPTOR_RSS_FIELD_LINK ||
rss_parser->current_field == RAPTOR_RSS_FIELD_ATOM_LINK) {
RAPTOR_DEBUG2(" setting href as URI string for type %s\n", raptor_rss_items_info[rss_parser->current_type].name);
if(rss_element->uri)
raptor_free_uri(rss_element->uri);
rss_element->uri = raptor_new_uri_relative_to_base(rdf_parser->world, base_uri,
(const unsigned char*)attrValue);
}
} else if(!strcmp((const char*)attrName, "type")) {
if(rss_parser->current_field == RAPTOR_RSS_FIELD_ATOM_LINK) {
/* do nothing with atom link attribute type */
} else if(rss_parser->is_atom) {
/* Atom only typing */
if(!strcmp((const char*)attrValue, "xhtml") ||
!strcmp((const char*)attrValue, "xml") ||
strstr((const char*)attrValue, "+xml")) {
RAPTOR_DEBUG2(" found type '%s', making an XML writer\n",
attrValue);
rss_element->type = RAPTOR_RSS_CONTENT_TYPE_XML;
rss_element->iostream = raptor_new_iostream_to_string(rdf_parser->world,
&rss_element->xml_content,
&rss_element->xml_content_length,
raptor_alloc_memory);
rss_element->xml_writer = raptor_new_xml_writer(rdf_parser->world,
NULL,
rss_element->iostream);
raptor_xml_writer_set_option(rss_element->xml_writer,
RAPTOR_OPTION_WRITER_XML_DECLARATION,
NULL, 0);
raptor_free_stringbuffer(rss_element->sb);
rss_element->sb = NULL;
}
}
} else if(!strcmp((const char*)attrName, "version")) {
if(!raptor_strcasecmp((const char*)name, "feed")) {
if(!strcmp((const char*)attrValue, "0.3"))
rss_parser->is_atom = 1;
}
}
}
} /* if have field attributes */
}
static void
raptor_rss_end_element_handler(void *user_data,
raptor_xml_element* xml_element)
{
raptor_parser* rdf_parser;
raptor_rss_parser* rss_parser;
#ifdef RAPTOR_DEBUG
const unsigned char* name = raptor_xml_element_get_name(xml_element)->local_name;
#endif
raptor_rss_element* rss_element;
size_t cdata_len = 0;
unsigned char* cdata = NULL;
rss_element = (raptor_rss_element*)xml_element->user_data;
rdf_parser = (raptor_parser*)user_data;
rss_parser = (raptor_rss_parser*)rdf_parser->context;
if(rss_element->xml_writer) {
if(rss_element->type != RAPTOR_RSS_CONTENT_TYPE_XML) {
raptor_xml_writer_end_element(rss_element->xml_writer, xml_element);
goto tidy_end_element;
}
/* otherwise we are done making XML */
raptor_free_iostream(rss_element->iostream);
rss_element->iostream = NULL;
cdata = (unsigned char*)rss_element->xml_content;
cdata_len = rss_element->xml_content_length;
}
if(rss_element->sb) {
cdata_len = raptor_stringbuffer_length(rss_element->sb);
cdata = raptor_stringbuffer_as_string(rss_element->sb);
}
if(cdata) {
raptor_uri* base_uri = NULL;
base_uri = raptor_sax2_inscope_base_uri(rss_parser->sax2);
if(rss_parser->current_block) {
const raptor_rss_block_field_info *bfi;
int handled = 0;
/* in a block, maybe store the CDATA there */
for(bfi = &raptor_rss_block_fields_info[0];
bfi->type != RAPTOR_RSS_NONE;
bfi++) {
if(bfi->type != rss_parser->current_block->rss_type ||
bfi->attribute != NULL)
continue;
/* Set author name from element */
if(raptor_rss_block_set_field(rdf_parser->world, base_uri,
rss_parser->current_block,
bfi, (const char*)cdata)) {
rdf_parser->failed = 1;
return;
}
handled = 1;
break;
}
#ifdef RAPTOR_DEBUG
if(!handled) {
raptor_rss_type block_type = rss_parser->current_block->rss_type;
RAPTOR_DEBUG3("Ignoring cdata for block %d - %s\n",
block_type, raptor_rss_items_info[block_type].name);
}
#endif
rss_parser->current_block = NULL;
goto do_end_element;
}
if(rss_parser->current_type == RAPTOR_RSS_NONE ||
(rss_parser->current_field == RAPTOR_RSS_FIELD_NONE ||
rss_parser->current_field == RAPTOR_RSS_FIELD_UNKNOWN)) {
unsigned char *p = cdata;
size_t i;
for(i = cdata_len; i > 0 && *p; i--) {
if(!isspace(*p))
break;
p++;
}
if(i > 0 && *p) {
RAPTOR_DEBUG4("IGNORING non-whitespace text '%s' inside type %s, field %s\n", cdata,
raptor_rss_items_info[rss_parser->current_type].name,
raptor_rss_fields_info[rss_parser->current_field].name);
}
goto do_end_element;
}
if(rss_parser->current_type >= RAPTOR_RSS_COMMON_IGNORED) {
/* skipHours, skipDays common but IGNORED */
RAPTOR_DEBUG2("Ignoring fields for type %s\n", raptor_rss_items_info[rss_parser->current_type].name);
} else {
raptor_rss_item* update_item = raptor_rss_get_current_item(rss_parser);
raptor_rss_field* field = raptor_rss_new_field(rdf_parser->world);
/* if value is always an uri, make it so */
if(raptor_rss_fields_info[rss_parser->current_field].flags &
RAPTOR_RSS_INFO_FLAG_URI_VALUE) {
RAPTOR_DEBUG4("Added URI %s to field %s of type %s\n", cdata, raptor_rss_fields_info[rss_parser->current_field].name, raptor_rss_items_info[rss_parser->current_type].name);
field->uri = raptor_new_uri_relative_to_base(rdf_parser->world, base_uri, cdata);
} else {
RAPTOR_DEBUG4("Added text '%s' to field %s of type %s\n", cdata, raptor_rss_fields_info[rss_parser->current_field].name, raptor_rss_items_info[rss_parser->current_type].name);
field->uri = NULL;
field->value = RAPTOR_MALLOC(unsigned char*, cdata_len + 1);
if(!field->value) {
rdf_parser->failed = 1;
return;
}
memcpy(field->value, cdata, cdata_len);
field->value[cdata_len] = '\0';
}
RAPTOR_DEBUG1("fa3 - ");
raptor_rss_item_add_field(update_item, rss_parser->current_field, field);
}
} /* end if contained cdata */
if(raptor_xml_element_is_empty(xml_element)) {
/* Empty element, so consider adding one of the attributes as
* literal or URI content
*/
if(rss_parser->current_type >= RAPTOR_RSS_COMMON_IGNORED) {
/* skipHours, skipDays common but IGNORED */
RAPTOR_DEBUG3("Ignoring empty element %s for type %s\n", name, raptor_rss_items_info[rss_parser->current_type].name);
} else if(rss_element->uri) {
raptor_rss_item* update_item = raptor_rss_get_current_item(rss_parser);
raptor_rss_field* field = raptor_rss_new_field(rdf_parser->world);
if(rss_parser->current_field == RAPTOR_RSS_FIELD_UNKNOWN) {
RAPTOR_DEBUG2("Cannot add URI from alternate attribute to type %s unknown field\n", raptor_rss_items_info[rss_parser->current_type].name);
raptor_rss_field_free(field);
} else {
RAPTOR_DEBUG3("Added URI to field %s of type %s\n", raptor_rss_fields_info[rss_parser->current_field].name, raptor_rss_items_info[rss_parser->current_type].name);
field->uri = rss_element->uri;
rss_element->uri = NULL;
RAPTOR_DEBUG1("fa2 - ");
raptor_rss_item_add_field(update_item, rss_parser->current_field, field);
}
}
}
do_end_element:
if(rss_parser->current_type != RAPTOR_RSS_NONE) {
if(rss_parser->current_field != RAPTOR_RSS_FIELD_NONE) {
RAPTOR_DEBUG3("Ending element %s field %s\n", name, raptor_rss_fields_info[rss_parser->current_field].name);
rss_parser->current_field = RAPTOR_RSS_FIELD_NONE;
} else {
RAPTOR_DEBUG3("Ending element %s type %s\n", name, raptor_rss_items_info[rss_parser->current_type].name);
if(rss_parser->prev_type != RAPTOR_RSS_NONE) {
rss_parser->current_type = rss_parser->prev_type;
rss_parser->prev_type = RAPTOR_RSS_NONE;
RAPTOR_DEBUG3("Returning to type %d - %s\n", rss_parser->current_type, raptor_rss_items_info[rss_parser->current_type].name);
} else
rss_parser->current_type = RAPTOR_RSS_NONE;
}
}
if(rss_parser->current_block) {
#ifdef RAPTOR_DEBUG
raptor_rss_type block_type = rss_parser->current_block->rss_type;
RAPTOR_DEBUG3("Ending current block %d - %s\n",
block_type, raptor_rss_items_info[block_type].name);
#endif
rss_parser->current_block = NULL;
}
tidy_end_element:
if(rss_element)
raptor_free_rss_element(rss_element);
}
static void
raptor_rss_cdata_handler(void *user_data, raptor_xml_element* xml_element,
const unsigned char *s, int len)
{
raptor_rss_element* rss_element;
rss_element = (raptor_rss_element*)xml_element->user_data;
if(rss_element->xml_writer) {
raptor_xml_writer_cdata_counted(rss_element->xml_writer, s, len);
return;
}
raptor_stringbuffer_append_counted_string(rss_element->sb, s, len, 1);
}
static void
raptor_rss_comment_handler(void *user_data, raptor_xml_element* xml_element,
const unsigned char *s)
{
raptor_rss_element* rss_element;
if(!xml_element)
return;
rss_element = (raptor_rss_element*)xml_element->user_data;
if(rss_element->xml_writer) {
raptor_xml_writer_comment(rss_element->xml_writer, s);
return;
}
}
static void
raptor_rss_sax2_new_namespace_handler(void *user_data,
raptor_namespace* nspace)
{
raptor_parser* rdf_parser = (raptor_parser*)user_data;
raptor_rss_parser* rss_parser;
int n;
rss_parser = (raptor_rss_parser*)rdf_parser->context;
for(n = 0; n < RAPTOR_RSS_NAMESPACES_SIZE; n++) {
raptor_uri* ns_uri = rdf_parser->world->rss_namespaces_info_uris[n];
if(!ns_uri)
continue;
if(!raptor_uri_equals(ns_uri, nspace->uri)) {
rss_parser->nspaces_seen[n] = 'Y';
break;
}
}
}
/* Add an rss:link from string contents of either:
* atom:id
* atom:link[@rel="self"]/@href
*/
static int
raptor_rss_insert_rss_link(raptor_parser* rdf_parser,
raptor_rss_item* item)
{
raptor_rss_block *block;
raptor_rss_field* id_field;
raptor_rss_field* field = NULL;
/* Try atom:id first */
id_field = item->fields[RAPTOR_RSS_FIELD_ATOM_ID];
if(id_field && id_field->value) {
const char *value = (const char*)id_field->value;
size_t len = strlen(value);
field = raptor_rss_new_field(item->world);
if(!field)
return 1;
field->value = RAPTOR_MALLOC(unsigned char*, len + 1);
if(!field->value)
return 1;
memcpy(field->value, value, len + 1);
raptor_rss_item_add_field(item, RAPTOR_RSS_FIELD_LINK, field);
return 0;
}
for(block = item->blocks; block; block = block->next) {
if(block->rss_type != RAPTOR_ATOM_LINK)
continue;
/* <link @href> is url at offset RAPTOR_RSS_LINK_HREF_URL_OFFSET
* <link @rel> is string at offset RAPTOR_RSS_LINK_REL_STRING_OFFSET
* The raptor_rss_block_fields_info structure records this
*/
if(!block->urls[RAPTOR_RSS_LINK_HREF_URL_OFFSET] ||
(block->strings[RAPTOR_RSS_LINK_REL_STRING_OFFSET] &&
strcmp(block->strings[RAPTOR_RSS_LINK_REL_STRING_OFFSET], "self"))
)
continue;
/* set the field rss:link to the string value of the @href */
field = raptor_rss_new_field(item->world);
field->value = raptor_uri_to_string(block->urls[0]);
raptor_rss_item_add_field(item, RAPTOR_RSS_FIELD_LINK, field);
return 0;
}
return 0;
}
static int
raptor_rss_insert_identifiers(raptor_parser* rdf_parser)
{
raptor_rss_parser* rss_parser = (raptor_rss_parser*)rdf_parser->context;
int i;
raptor_rss_item* item;
for(i = 0; i< RAPTOR_RSS_COMMON_SIZE; i++) {
for(item = rss_parser->model.common[i]; item; item = item->next) {
if(!item->fields_count)
continue;
RAPTOR_DEBUG3("Inserting identifiers in common type %d - %s\n", i, raptor_rss_items_info[i].name);
if(item->uri) {
item->term = raptor_new_term_from_uri(rdf_parser->world, item->uri);
} else {
int url_fields[2];
int url_fields_count = 1;
int f;
url_fields[0] = (i== RAPTOR_RSS_IMAGE) ? RAPTOR_RSS_FIELD_URL :
RAPTOR_RSS_FIELD_LINK;
if(i == RAPTOR_RSS_CHANNEL) {
url_fields[1] = RAPTOR_RSS_FIELD_ATOM_ID;
url_fields_count++;
}
for(f = 0; f < url_fields_count; f++) {
raptor_rss_field* field;
for(field = item->fields[url_fields[f]]; field; field = field->next) {
raptor_uri *new_uri = NULL;
if(field->value)
new_uri = raptor_new_uri(rdf_parser->world,
(const unsigned char*)field->value);
else if(field->uri)
new_uri = raptor_uri_copy(field->uri);
if(!new_uri)
return 1;
item->term = raptor_new_term_from_uri(rdf_parser->world, new_uri);
raptor_free_uri(new_uri);
if(!item->term)
return 1;
break;
}
}
if(!item->term) {
const unsigned char *id;
/* need to make bnode */
id = raptor_world_generate_bnodeid(rdf_parser->world);
item->term = raptor_new_term_from_blank(rdf_parser->world, id);
RAPTOR_FREE(char*, id);
}
}
/* Try to add an rss:link if missing */
if(i == RAPTOR_RSS_CHANNEL && !item->fields[RAPTOR_RSS_FIELD_LINK]) {
if(raptor_rss_insert_rss_link(rdf_parser, item))
return 1;
}
item->node_type = &raptor_rss_items_info[i];
item->node_typei = i;
}
}
/* sequence of rss:item */
for(item = rss_parser->model.items; item; item = item->next) {
raptor_rss_block *block;
raptor_uri* uri;
if(!item->fields[RAPTOR_RSS_FIELD_LINK]) {
if(raptor_rss_insert_rss_link(rdf_parser, item))
return 1;
}
if(item->uri) {
uri = raptor_uri_copy(item->uri);
} else {
if(item->fields[RAPTOR_RSS_FIELD_LINK]) {
if(item->fields[RAPTOR_RSS_FIELD_LINK]->value)
uri = raptor_new_uri(rdf_parser->world,
(const unsigned char*)item->fields[RAPTOR_RSS_FIELD_LINK]->value);
else if(item->fields[RAPTOR_RSS_FIELD_LINK]->uri)
uri = raptor_uri_copy(item->fields[RAPTOR_RSS_FIELD_LINK]->uri);
} else if(item->fields[RAPTOR_RSS_FIELD_ATOM_ID]) {
if(item->fields[RAPTOR_RSS_FIELD_ATOM_ID]->value)
uri = raptor_new_uri(rdf_parser->world,
(const unsigned char*)item->fields[RAPTOR_RSS_FIELD_ATOM_ID]->value);
else if(item->fields[RAPTOR_RSS_FIELD_ATOM_ID]->uri)
uri = raptor_uri_copy(item->fields[RAPTOR_RSS_FIELD_ATOM_ID]->uri);
}
}
item->term = raptor_new_term_from_uri(rdf_parser->world, uri);
raptor_free_uri(uri);
uri = NULL;
for(block = item->blocks; block; block = block->next) {
if(!block->identifier) {
const unsigned char *id;
/* need to make bnode */
id = raptor_world_generate_bnodeid(rdf_parser->world);
item->term = raptor_new_term_from_blank(rdf_parser->world, id);
RAPTOR_FREE(char*, id);
}
}
item->node_type = &raptor_rss_items_info[RAPTOR_RSS_ITEM];
item->node_typei = RAPTOR_RSS_ITEM;
}
return 0;
}
static int
raptor_rss_emit_type_triple(raptor_parser* rdf_parser,
raptor_term *resource,
raptor_uri *type_uri)
{
raptor_rss_parser* rss_parser = (raptor_rss_parser*)rdf_parser->context;
raptor_term *predicate_term;
raptor_term *object_term;
if(!resource) {
raptor_parser_error(rdf_parser, "RSS node has no identifier");
return 1;
}
rss_parser->statement.subject = resource;
predicate_term = raptor_new_term_from_uri(rdf_parser->world,
RAPTOR_RDF_type_URI(rdf_parser->world));
rss_parser->statement.predicate = predicate_term;
object_term = raptor_new_term_from_uri(rdf_parser->world, type_uri);
rss_parser->statement.object = object_term;
/* Generate the statement */
(*rdf_parser->statement_handler)(rdf_parser->user_data, &rss_parser->statement);
raptor_free_term(predicate_term);
raptor_free_term(object_term);
return 0;
}
static int
raptor_rss_emit_block(raptor_parser* rdf_parser,
raptor_term *resource,
raptor_rss_block *block)
{
raptor_rss_parser* rss_parser = (raptor_rss_parser*)rdf_parser->context;
raptor_rss_type block_type = block->rss_type;
raptor_uri *predicate_uri;
raptor_term *predicate_term = NULL;
const raptor_rss_block_field_info *bfi;
raptor_rss_fields_type predicate_field;
if(!block->identifier) {
raptor_parser_error(rdf_parser, "Block has no identifier");
return 1;
}
predicate_field = raptor_rss_items_info[block_type].predicate;
predicate_uri = rdf_parser->world->rss_fields_info_uris[predicate_field];
predicate_term = raptor_new_term_from_uri(rdf_parser->world,
predicate_uri);
rss_parser->statement.subject = resource;
rss_parser->statement.predicate = predicate_term;
rss_parser->statement.object = block->identifier;
(*rdf_parser->statement_handler)(rdf_parser->user_data,
&rss_parser->statement);
raptor_free_term(predicate_term); predicate_term = NULL;
if(raptor_rss_emit_type_triple(rdf_parser, block->identifier,
block->node_type))
return 1;
for(bfi = &raptor_rss_block_fields_info[0];
bfi->type != RAPTOR_RSS_NONE;
bfi++) {
int attribute_type;
int offset;
if(bfi->type != block_type || !bfi->attribute)
continue;
attribute_type = bfi->attribute_type;
offset = bfi->offset;
predicate_uri = rdf_parser->world->rss_fields_info_uris[bfi->field];
predicate_term = raptor_new_term_from_uri(rdf_parser->world,
predicate_uri);
rss_parser->statement.predicate = predicate_term;
if(attribute_type == RSS_BLOCK_FIELD_TYPE_URL) {
raptor_uri *uri = block->urls[offset];
if(uri) {
raptor_term* object_term;
object_term = raptor_new_term_from_uri(rdf_parser->world, uri);
rss_parser->statement.object = object_term;
(*rdf_parser->statement_handler)(rdf_parser->user_data,
&rss_parser->statement);
raptor_free_term(object_term);
}
} else if(attribute_type == RSS_BLOCK_FIELD_TYPE_STRING) {
const char *str = block->strings[offset];
if(str) {
raptor_term* object_term;
object_term = raptor_new_term_from_literal(rdf_parser->world,
(const unsigned char*)str,
NULL, NULL);
rss_parser->statement.object = object_term;
(*rdf_parser->statement_handler)(rdf_parser->user_data,
&rss_parser->statement);
raptor_free_term(object_term);
}
} else {
#ifdef RAPTOR_DEBUG
RAPTOR_FATAL2("Found unknown attribute_type %d\n", attribute_type);
#endif
}
raptor_free_term(predicate_term); predicate_term = NULL;
}
return 0;
}
static int
raptor_rss_emit_item(raptor_parser* rdf_parser, raptor_rss_item *item)
{
raptor_rss_parser* rss_parser = (raptor_rss_parser*)rdf_parser->context;
int f;
raptor_rss_block *block;
raptor_uri *type_uri;
if(!item->fields_count)
return 0;
/* HACK - FIXME - set correct atom output class type */
if(item->node_typei == RAPTOR_ATOM_AUTHOR)
type_uri = rdf_parser->world->rss_fields_info_uris[RAPTOR_RSS_RDF_ATOM_AUTHOR_CLASS];
else
type_uri = rdf_parser->world->rss_types_info_uris[item->node_typei];
if(raptor_rss_emit_type_triple(rdf_parser, item->term, type_uri))
return 1;
for(f = 0; f< RAPTOR_RSS_FIELDS_SIZE; f++) {
raptor_rss_field* field;
raptor_uri* predicate_uri = NULL;
raptor_term* predicate_term = NULL;
/* This is only made by a connection */
if(f == RAPTOR_RSS_FIELD_ITEMS)
continue;
/* skip predicates with no URI (no namespace e.g. RSS 2) */
predicate_uri = rdf_parser->world->rss_fields_info_uris[f];
if(!predicate_uri)
continue;
predicate_term = raptor_new_term_from_uri(rdf_parser->world,
predicate_uri);
if(!predicate_term)
continue;
rss_parser->statement.predicate = predicate_term;
for(field = item->fields[f]; field; field = field->next) {
raptor_term* object_term;
if(field->value) {
/* FIXME - should store and emit languages */
object_term = raptor_new_term_from_literal(rdf_parser->world,
field->value,
NULL, NULL);
} else {
object_term = raptor_new_term_from_uri(rdf_parser->world,
field->uri);
}
rss_parser->statement.object = object_term;
/* Generate the statement */
(*rdf_parser->statement_handler)(rdf_parser->user_data,
&rss_parser->statement);
raptor_free_term(object_term);
}
raptor_free_term(predicate_term);
}
for(block = item->blocks; block; block = block->next) {
raptor_rss_emit_block(rdf_parser, item->term, block);
}
return 0;
}
static int
raptor_rss_emit_connection(raptor_parser* rdf_parser,
raptor_term *subject_identifier,
raptor_uri* predicate_uri, int predicate_ordinal,
raptor_term *object_identifier)
{
raptor_rss_parser* rss_parser = (raptor_rss_parser*)rdf_parser->context;
raptor_uri *puri = NULL;
raptor_term *predicate_term = NULL;
if(!subject_identifier) {
raptor_parser_error(rdf_parser, "Connection subject has no identifier");
return 1;
}
rss_parser->statement.subject = subject_identifier;
if(!predicate_uri) {
/* new URI object */
puri = raptor_new_uri_from_rdf_ordinal(rdf_parser->world, predicate_ordinal);
predicate_uri = puri;
}
predicate_term = raptor_new_term_from_uri(rdf_parser->world,
predicate_uri);
rss_parser->statement.predicate = predicate_term;
rss_parser->statement.object = object_identifier;
/* Generate the statement */
(*rdf_parser->statement_handler)(rdf_parser->user_data,
&rss_parser->statement);
raptor_free_term(predicate_term);
if(puri)
raptor_free_uri(puri);
return 0;
}
static int
raptor_rss_emit(raptor_parser* rdf_parser)
{
raptor_rss_parser* rss_parser = (raptor_rss_parser*)rdf_parser->context;
int i;
raptor_rss_item* item;
int rc = 0;
if(!rss_parser->model.common[RAPTOR_RSS_CHANNEL]) {
raptor_parser_error(rdf_parser, "No RSS channel item present");
return 1;
}
if(!rss_parser->model.common[RAPTOR_RSS_CHANNEL]->term) {
raptor_parser_error(rdf_parser, "RSS channel has no identifier");
return 1;
}
/* Emit start default graph mark */
raptor_parser_start_graph(rdf_parser, NULL, 0);
rdf_parser->emitted_default_graph++;
/* Emit all the common type blocks (channel, author, ...) */
for(i = 0; i< RAPTOR_RSS_COMMON_SIZE; i++) {
for(item = rss_parser->model.common[i]; item; item = item->next) {
if(!item->fields_count)
continue;
RAPTOR_DEBUG3("Emitting type %i - %s\n", i, raptor_rss_items_info[i].name);
if(!item->term) {
raptor_parser_error(rdf_parser, "RSS %s has no identifier",
raptor_rss_items_info[i].name);
rc = 1;
goto tidy;
}
if(raptor_rss_emit_item(rdf_parser, item)) {
rc = 1;
goto tidy;
}
/* Add connections to channel */
if(i != RAPTOR_RSS_CHANNEL) {
if(raptor_rss_emit_connection(rdf_parser,
rss_parser->model.common[RAPTOR_RSS_CHANNEL]->term,
rdf_parser->world->rss_types_info_uris[i], 0,
item->term)) {
rc = 1;
goto tidy;
}
}
}
}
/* Emit the feed item blocks */
if(rss_parser->model.items_count) {
const unsigned char* id;
raptor_term *items;
id = raptor_world_generate_bnodeid(rdf_parser->world);
/* make a new genid for the <rdf:Seq> node */
items = raptor_new_term_from_blank(rdf_parser->world, id);
RAPTOR_FREE(char*, id);
/* _:genid1 rdf:type rdf:Seq . */
if(raptor_rss_emit_type_triple(rdf_parser, items,
RAPTOR_RDF_Seq_URI(rdf_parser->world))) {
raptor_free_term(items);
rc = 1;
goto tidy;
}
/* <channelURI> rss:items _:genid1 . */
if(raptor_rss_emit_connection(rdf_parser,
rss_parser->model.common[RAPTOR_RSS_CHANNEL]->term,
rdf_parser->world->rss_fields_info_uris[RAPTOR_RSS_FIELD_ITEMS], 0,
items)) {
raptor_free_term(items);
rc= 1;
goto tidy;
}
/* sequence of rss:item */
for(i = 1, item = rss_parser->model.items; item; item = item->next, i++) {
if(raptor_rss_emit_item(rdf_parser, item) ||
raptor_rss_emit_connection(rdf_parser, items, NULL, i,item->term)) {
raptor_free_term(items);
rc = 1;
goto tidy;
}
}
raptor_free_term(items);
}
tidy:
if(rdf_parser->emitted_default_graph) {
raptor_parser_end_graph(rdf_parser, NULL, 0);
rdf_parser->emitted_default_graph--;
}
return rc;
}
static int
raptor_rss_copy_field(raptor_rss_parser* rss_parser,
raptor_rss_item* item,
const raptor_field_pair* pair)
{
raptor_rss_fields_type from_field = pair->from;
raptor_rss_fields_type to_field = pair->to;
raptor_rss_field* field = NULL;
if(!(item->fields[from_field] && item->fields[from_field]->value))
return 1;
if(from_field == to_field) {
field = item->fields[from_field];
} else {
if(item->fields[to_field] && item->fields[to_field]->value)
return 1;
field = raptor_rss_new_field(item->world);
field->is_mapped = 1;
raptor_rss_item_add_field(item, to_field, field);
}
/* Ensure output namespace is declared */
rss_parser->nspaces_seen[raptor_rss_fields_info[to_field].nspace] = 'Y';
if(!field->value) {
if(pair->conversion)
pair->conversion(item->fields[from_field], field);
else {
size_t len;
/* Otherwise default action is to copy from_field value */
len = strlen((const char*)item->fields[from_field]->value);
field->value = RAPTOR_MALLOC(unsigned char*, len + 1);
if(!field->value)
return 1;
memcpy(field->value, item->fields[from_field]->value, len + 1);
}
}
return 0;
}
static void
raptor_rss_uplift_fields(raptor_rss_parser* rss_parser, raptor_rss_item* item)
{
int i;
/* COPY some fields from atom to rss/dc */
for(i = 0; raptor_atom_to_rss[i].from != RAPTOR_RSS_FIELD_UNKNOWN; i++) {
#ifdef RAPTOR_DEBUG
raptor_rss_fields_type from_field = raptor_atom_to_rss[i].from;
raptor_rss_fields_type to_field = raptor_atom_to_rss[i].to;
#endif
if(raptor_rss_copy_field(rss_parser, item, &raptor_atom_to_rss[i]))
continue;
RAPTOR_DEBUG3("Copied field %s to rss field %s\n",
raptor_rss_fields_info[from_field].name,
raptor_rss_fields_info[to_field].name);
}
}
static void
raptor_rss_uplift_items(raptor_parser* rdf_parser)
{
raptor_rss_parser* rss_parser = (raptor_rss_parser*)rdf_parser->context;
int i;
raptor_rss_item* item;
for(i = 0; i< RAPTOR_RSS_COMMON_SIZE; i++) {
for(item = rss_parser->model.common[i]; item; item = item->next) {
raptor_rss_uplift_fields(rss_parser, item);
}
}
for(item = rss_parser->model.items; item; item = item->next) {
raptor_rss_uplift_fields(rss_parser, item);
}
}
static void
raptor_rss_start_namespaces(raptor_parser* rdf_parser)
{
raptor_rss_parser* rss_parser = (raptor_rss_parser*)rdf_parser->context;
int i;
int n;
/* for each item type (channel, item, ...) */
for(i = 0; i< RAPTOR_RSS_COMMON_SIZE; i++) {
raptor_rss_item* item;
/* for each item instance of a type */
for(item = rss_parser->model.common[i]; item; item = item->next) {
int f;
if(!item->fields_count)
continue;
/* for each field */
for(f = 0; f< RAPTOR_RSS_FIELDS_SIZE; f++) {
raptor_rss_field* field;
/* for each field value */
for(field = item->fields[f]; field; field = field->next) {
rss_info_namespace ns_index = raptor_rss_fields_info[f].nspace;
rss_parser->nspaces_seen[ns_index] = 'Y';
/* knowing there is one value is enough */
break;
}
}
}
}
/* start the namespaces */
for(n = 0; n < RAPTOR_RSS_NAMESPACES_SIZE; n++) {
if(rss_parser->nspaces[n] && rss_parser->nspaces_seen[n] == 'Y')
raptor_parser_start_namespace(rdf_parser, rss_parser->nspaces[n]);
}
}
static int
raptor_rss_parse_chunk(raptor_parser* rdf_parser,
const unsigned char *s, size_t len,
int is_end)
{
raptor_rss_parser* rss_parser = (raptor_rss_parser*)rdf_parser->context;
if(rdf_parser->failed)
return 1;
raptor_sax2_parse_chunk(rss_parser->sax2, s, len, is_end);
if(!is_end)
return 0;
if(rdf_parser->failed)
return 1;
/* turn strings into URIs, move things around if needed */
if(raptor_rss_insert_identifiers(rdf_parser)) {
rdf_parser->failed = 1;
return 1;
}
/* add some new fields */
raptor_rss_uplift_items(rdf_parser);
/* find out what namespaces to declare and start them */
raptor_rss_start_namespaces(rdf_parser);
/* generate the triples */
raptor_rss_emit(rdf_parser);
return 0;
}
static int
raptor_rss_parse_recognise_syntax(raptor_parser_factory* factory,
const unsigned char *buffer, size_t len,
const unsigned char *identifier,
const unsigned char *suffix,
const char *mime_type)
{
int score = 0;
if(suffix) {
if(!strcmp((const char*)suffix, "rss"))
score = 7;
if(!strcmp((const char*)suffix, "atom"))
score = 5;
if(!strcmp((const char*)suffix, "xml"))
score = 4;
}
if(identifier) {
if(!strncmp((const char*)identifier, "http://feed", 11))
score += 5;
else if(strstr((const char*)identifier, "feed"))
score += 3;
if(strstr((const char*)identifier, "rss2"))
score += 5;
else if(!suffix && strstr((const char*)identifier, "rss"))
score += 4;
else if(!suffix && strstr((const char*)identifier, "atom"))
score += 4;
else if(strstr((const char*)identifier, "rss.xml"))
score += 4;
else if(strstr((const char*)identifier, "atom.xml"))
score += 4;
}
if(mime_type) {
if(!strstr((const char*)mime_type, "html")) {
if(strstr((const char*)mime_type, "rss"))
score += 4;
else if(strstr((const char*)mime_type, "xml"))
score += 4;
else if(strstr((const char*)mime_type, "atom"))
score += 4;
}
}
return score;
}
static const char* const rss_tag_soup_names[2] = { "rss-tag-soup", NULL };
#define RSS_TAG_SOUP_TYPES_COUNT 6
static const raptor_type_q rss_tag_soup_types[RSS_TAG_SOUP_TYPES_COUNT + 1] = {
{ "application/rss", 15, 8},
{ "application/rss+xml", 19, 8},
{ "text/rss", 8, 8},
{ "application/xml", 15, 3},
{ "text/xml", 8, 3},
{ "application/atom+xml", 20, 3},
{ NULL, 0, 0}
};
static int
raptor_rss_parser_register_factory(raptor_parser_factory *factory)
{
int rc = 0;
factory->desc.names = rss_tag_soup_names;
factory->desc.mime_types = rss_tag_soup_types;
factory->desc.label = "RSS Tag Soup";
factory->desc.uri_strings = NULL;
factory->desc.flags = RAPTOR_SYNTAX_NEED_BASE_URI;
factory->context_length = sizeof(raptor_rss_parser);
factory->init = raptor_rss_parse_init;
factory->terminate = raptor_rss_parse_terminate;
factory->start = raptor_rss_parse_start;
factory->chunk = raptor_rss_parse_chunk;
factory->recognise_syntax = raptor_rss_parse_recognise_syntax;
return rc;
}
int
raptor_init_parser_rss(raptor_world* world)
{
return !raptor_world_register_parser_factory(world,
&raptor_rss_parser_register_factory);
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_3568_9 |
crossvul-cpp_data_bad_860_0 | /* $Id: upnpevents.c,v 1.39 2018/03/12 22:41:54 nanard Exp $ */
/* vim: tabstop=4 shiftwidth=4 noexpandtab
* MiniUPnP project
* http://miniupnp.free.fr/ or http://miniupnp.tuxfamily.org/
* (c) 2008-2018 Thomas Bernard
* This software is subject to the conditions detailed
* in the LICENCE file provided within the distribution */
#include <stdio.h>
#include <string.h>
#include <syslog.h>
#include <sys/queue.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <errno.h>
#include "config.h"
#if defined(LIB_UUID)
/* as found on linux */
#include <uuid/uuid.h>
#elif defined(BSD_UUID)
#include <uuid.h>
#endif /* LIB_UUID / BSD_UUID */
#include "upnpevents.h"
#include "miniupnpdpath.h"
#include "upnpglobalvars.h"
#include "upnpdescgen.h"
#include "upnputils.h"
#ifdef ENABLE_EVENTS
/*enum subscriber_service_enum {
EWanCFG = 1,
EWanIPC,
EL3F
};*/
/* stuctures definitions */
struct subscriber {
LIST_ENTRY(subscriber) entries;
struct upnp_event_notify * notify;
time_t timeout;
uint32_t seq;
enum subscriber_service_enum service;
char uuid[42];
char callback[];
};
struct upnp_event_notify {
LIST_ENTRY(upnp_event_notify) entries;
int s; /* socket */
enum { ECreated=1,
EConnecting,
ESending,
EWaitingForResponse,
EFinished,
EError } state;
struct subscriber * sub;
char * buffer;
int buffersize;
int tosend;
int sent;
const char * path;
#ifdef ENABLE_IPV6
int ipv6;
char addrstr[48];
#else
char addrstr[16];
#endif
char portstr[8];
};
/* prototypes */
static void
upnp_event_create_notify(struct subscriber * sub);
/* Subscriber list */
LIST_HEAD(listhead, subscriber) subscriberlist = { NULL };
/* notify list */
LIST_HEAD(listheadnotif, upnp_event_notify) notifylist = { NULL };
/* create a new subscriber */
static struct subscriber *
newSubscriber(const char * eventurl, const char * callback, int callbacklen)
{
struct subscriber * tmp;
if(!eventurl || !callback || !callbacklen)
return NULL;
tmp = calloc(1, sizeof(struct subscriber)+callbacklen+1);
if(!tmp)
return NULL;
if(strcmp(eventurl, WANCFG_EVENTURL)==0)
tmp->service = EWanCFG;
else if(strcmp(eventurl, WANIPC_EVENTURL)==0)
tmp->service = EWanIPC;
#ifdef ENABLE_L3F_SERVICE
else if(strcmp(eventurl, L3F_EVENTURL)==0)
tmp->service = EL3F;
#endif
#ifdef ENABLE_6FC_SERVICE
else if(strcmp(eventurl, WANIP6FC_EVENTURL)==0)
tmp->service = E6FC;
#endif
#ifdef ENABLE_DP_SERVICE
else if(strcmp(eventurl, DP_EVENTURL)==0)
tmp->service = EDP;
#endif
else {
free(tmp);
return NULL;
}
memcpy(tmp->callback, callback, callbacklen);
tmp->callback[callbacklen] = '\0';
#if defined(LIB_UUID)
{
uuid_t uuid;
uuid_generate(uuid);
memcpy(tmp->uuid, "uuid:", 5);
uuid_unparse(uuid, tmp->uuid + 5);
}
#elif defined(BSD_UUID)
{
uuid_t uuid;
uint32_t status;
uuid_create(&uuid, &status);
if(status != uuid_s_ok) {
syslog(LOG_ERR, "uuid_create() failed (%u)", status);
} else {
char * uuid_str;
uuid_to_string(&uuid, &uuid_str, &status);
if(status != uuid_s_ok) {
syslog(LOG_ERR, "uuid_to_string() failed (%u)", status);
} else {
if(strlen(uuid_str) != 36) {
syslog(LOG_ERR, "uuid_to_string() returned %s", uuid_str);
status = (uint32_t)-1;
} else {
memcpy(tmp->uuid, "uuid:", 5);
memcpy(tmp->uuid + 5, uuid_str, 36);
tmp->uuid[sizeof(tmp->uuid)-1] = '\0';
}
free(uuid_str);
}
}
if(status != uuid_s_ok) {
/* make a dummy uuid */
strncpy(tmp->uuid, uuidvalue_igd, sizeof(tmp->uuid));
tmp->uuid[sizeof(tmp->uuid)-1] = '\0';
snprintf(tmp->uuid+sizeof(tmp->uuid)-5, 5, "%04lx", random() & 0xffff);
}
}
#else
/* make a dummy uuid */
strncpy(tmp->uuid, uuidvalue_igd, sizeof(tmp->uuid));
tmp->uuid[sizeof(tmp->uuid)-1] = '\0';
snprintf(tmp->uuid+sizeof(tmp->uuid)-5, 5, "%04lx", random() & 0xffff);
#endif
return tmp;
}
/* creates a new subscriber and adds it to the subscriber list
* also initiate 1st notify
* TODO : add a check on the number of subscriber in order to
* prevent memory overflow... */
const char *
upnpevents_addSubscriber(const char * eventurl,
const char * callback, int callbacklen,
int timeout)
{
struct subscriber * tmp;
/*static char uuid[42];*/
/* "uuid:00000000-0000-0000-0000-000000000000"; 5+36+1=42bytes */
syslog(LOG_DEBUG, "addSubscriber(%s, %.*s, %d)",
eventurl, callbacklen, callback, timeout);
/*strncpy(uuid, uuidvalue, sizeof(uuid));
uuid[sizeof(uuid)-1] = '\0';*/
tmp = newSubscriber(eventurl, callback, callbacklen);
if(!tmp)
return NULL;
if(timeout)
tmp->timeout = upnp_time() + timeout;
LIST_INSERT_HEAD(&subscriberlist, tmp, entries);
upnp_event_create_notify(tmp);
return tmp->uuid;
}
/* renew a subscription (update the timeout) */
const char *
upnpevents_renewSubscription(const char * sid, int sidlen, int timeout)
{
struct subscriber * sub;
for(sub = subscriberlist.lh_first; sub != NULL; sub = sub->entries.le_next) {
if((sidlen == 41) && (memcmp(sid, sub->uuid, 41) == 0)) {
#ifdef UPNP_STRICT
/* check if the subscription already timeouted */
if(sub->timeout && upnp_time() > sub->timeout)
continue;
#endif
sub->timeout = (timeout ? upnp_time() + timeout : 0);
return sub->uuid;
}
}
return NULL;
}
int
upnpevents_removeSubscriber(const char * sid, int sidlen)
{
struct subscriber * sub;
if(!sid)
return -1;
for(sub = subscriberlist.lh_first; sub != NULL; sub = sub->entries.le_next) {
if((sidlen == 41) && (memcmp(sid, sub->uuid, 41) == 0)) {
if(sub->notify) {
sub->notify->sub = NULL;
}
LIST_REMOVE(sub, entries);
free(sub);
return 0;
}
}
return -1;
}
/* notifies all subscriber of a number of port mapping change
* or external ip address change */
void
upnp_event_var_change_notify(enum subscriber_service_enum service)
{
struct subscriber * sub;
for(sub = subscriberlist.lh_first; sub != NULL; sub = sub->entries.le_next) {
if(sub->service == service && sub->notify == NULL)
upnp_event_create_notify(sub);
}
}
/* create and add the notify object to the list */
static void
upnp_event_create_notify(struct subscriber * sub)
{
struct upnp_event_notify * obj;
/*struct timeval sock_timeout;*/
obj = calloc(1, sizeof(struct upnp_event_notify));
if(!obj) {
syslog(LOG_ERR, "%s: calloc(): %m", "upnp_event_create_notify");
return;
}
obj->sub = sub;
obj->state = ECreated;
#ifdef ENABLE_IPV6
obj->s = socket((obj->sub->callback[7] == '[') ? PF_INET6 : PF_INET,
SOCK_STREAM, 0);
#else
obj->s = socket(PF_INET, SOCK_STREAM, 0);
#endif
if(obj->s<0) {
syslog(LOG_ERR, "%s: socket(): %m", "upnp_event_create_notify");
goto error;
}
#if 0 /* does not work for non blocking connect() */
/* set timeout to 3 seconds */
sock_timeout.tv_sec = 3;
sock_timeout.tv_usec = 0;
if(setsockopt(obj->s, SOL_SOCKET, SO_RCVTIMEO, &sock_timeout, sizeof(struct timeval)) < 0) {
syslog(LOG_WARNING, "%s: setsockopt(SO_RCVTIMEO): %m",
"upnp_event_create_notify");
}
sock_timeout.tv_sec = 3;
sock_timeout.tv_usec = 0;
if(setsockopt(obj->s, SOL_SOCKET, SO_SNDTIMEO, &sock_timeout, sizeof(struct timeval)) < 0) {
syslog(LOG_WARNING, "%s: setsockopt(SO_SNDTIMEO): %m",
"upnp_event_create_notify");
}
#endif
/* set socket non blocking */
if(!set_non_blocking(obj->s)) {
syslog(LOG_ERR, "%s: set_non_blocking(): %m",
"upnp_event_create_notify");
goto error;
}
if(sub)
sub->notify = obj;
LIST_INSERT_HEAD(¬ifylist, obj, entries);
return;
error:
if(obj->s >= 0)
close(obj->s);
free(obj);
}
static void
upnp_event_notify_connect(struct upnp_event_notify * obj)
{
unsigned int i;
const char * p;
unsigned short port;
#ifdef ENABLE_IPV6
struct sockaddr_storage addr;
socklen_t addrlen;
#else
struct sockaddr_in addr;
socklen_t addrlen;
#endif
if(!obj)
return;
memset(&addr, 0, sizeof(addr));
i = 0;
if(obj->sub == NULL) {
obj->state = EError;
return;
}
p = obj->sub->callback;
p += 7; /* http:// */
#ifdef ENABLE_IPV6
if(*p == '[') { /* ip v6 */
obj->addrstr[i++] = '[';
p++;
obj->ipv6 = 1;
while(*p != ']' && i < (sizeof(obj->addrstr)-1))
obj->addrstr[i++] = *(p++);
if(*p == ']')
p++;
if(i < (sizeof(obj->addrstr)-1))
obj->addrstr[i++] = ']';
} else {
#endif
while(*p != '/' && *p != ':' && i < (sizeof(obj->addrstr)-1))
obj->addrstr[i++] = *(p++);
#ifdef ENABLE_IPV6
}
#endif
obj->addrstr[i] = '\0';
if(*p == ':') {
obj->portstr[0] = *p;
i = 1;
p++;
port = (unsigned short)atoi(p);
while(*p != '/') {
if(i<7) obj->portstr[i++] = *p;
p++;
}
obj->portstr[i] = 0;
} else {
port = 80;
obj->portstr[0] = '\0';
}
obj->path = p;
#ifdef ENABLE_IPV6
if(obj->ipv6) {
char addrstr_tmp[48];
struct sockaddr_in6 * sa = (struct sockaddr_in6 *)&addr;
sa->sin6_family = AF_INET6;
i = (int)strlen(obj->addrstr);
if(i > 2) {
i -= 2;
memcpy(addrstr_tmp, obj->addrstr + 1, i);
addrstr_tmp[i] = '\0';
inet_pton(AF_INET6, addrstr_tmp, &(sa->sin6_addr));
}
sa->sin6_port = htons(port);
addrlen = sizeof(struct sockaddr_in6);
} else {
struct sockaddr_in * sa = (struct sockaddr_in *)&addr;
sa->sin_family = AF_INET;
inet_pton(AF_INET, obj->addrstr, &(sa->sin_addr));
sa->sin_port = htons(port);
addrlen = sizeof(struct sockaddr_in);
}
#else
addr.sin_family = AF_INET;
inet_aton(obj->addrstr, &addr.sin_addr);
addr.sin_port = htons(port);
addrlen = sizeof(struct sockaddr_in);
#endif
syslog(LOG_DEBUG, "%s: '%s' %hu '%s'", "upnp_event_notify_connect",
obj->addrstr, port, obj->path);
obj->state = EConnecting;
if(connect(obj->s, (struct sockaddr *)&addr, addrlen) < 0) {
if(errno != EINPROGRESS && errno != EWOULDBLOCK) {
syslog(LOG_ERR, "%s: connect(%d, %s, %u): %m",
"upnp_event_notify_connect", obj->s,
obj->addrstr, addrlen);
obj->state = EError;
}
}
}
static void upnp_event_prepare(struct upnp_event_notify * obj)
{
static const char notifymsg[] =
"NOTIFY %s HTTP/1.1\r\n"
"Host: %s%s\r\n"
#if (UPNP_VERSION_MAJOR == 1) && (UPNP_VERSION_MINOR == 0)
"Content-Type: text/xml\r\n" /* UDA v1.0 */
#else
"Content-Type: text/xml; charset=\"utf-8\"\r\n" /* UDA v1.1 or later */
#endif
"Content-Length: %d\r\n"
"NT: upnp:event\r\n"
"NTS: upnp:propchange\r\n"
"SID: %s\r\n"
"SEQ: %u\r\n"
"Connection: close\r\n"
"Cache-Control: no-cache\r\n"
"\r\n"
"%.*s\r\n";
char * xml;
int l;
if(obj->sub == NULL) {
obj->state = EError;
return;
}
switch(obj->sub->service) {
case EWanCFG:
xml = getVarsWANCfg(&l);
break;
case EWanIPC:
xml = getVarsWANIPCn(&l);
break;
#ifdef ENABLE_L3F_SERVICE
case EL3F:
xml = getVarsL3F(&l);
break;
#endif
#ifdef ENABLE_6FC_SERVICE
case E6FC:
xml = getVars6FC(&l);
break;
#endif
#ifdef ENABLE_DP_SERVICE
case EDP:
xml = getVarsDP(&l);
break;
#endif
default:
xml = NULL;
l = 0;
}
obj->buffersize = 1024;
obj->buffer = malloc(obj->buffersize);
if(!obj->buffer) {
syslog(LOG_ERR, "%s: malloc returned NULL", "upnp_event_prepare");
if(xml) {
free(xml);
}
obj->state = EError;
return;
}
obj->tosend = snprintf(obj->buffer, obj->buffersize, notifymsg,
obj->path, obj->addrstr, obj->portstr, l+2,
obj->sub->uuid, obj->sub->seq,
l, xml);
if(xml) {
free(xml);
xml = NULL;
}
obj->state = ESending;
}
static void upnp_event_send(struct upnp_event_notify * obj)
{
int i;
syslog(LOG_DEBUG, "%s: sending event notify message to %s%s",
"upnp_event_send", obj->addrstr, obj->portstr);
syslog(LOG_DEBUG, "%s: msg: %s",
"upnp_event_send", obj->buffer + obj->sent);
i = send(obj->s, obj->buffer + obj->sent, obj->tosend - obj->sent, 0);
if(i<0) {
if(errno != EAGAIN && errno != EWOULDBLOCK && errno != EINTR) {
syslog(LOG_NOTICE, "%s: send(%s%s): %m", "upnp_event_send",
obj->addrstr, obj->portstr);
obj->state = EError;
return;
} else {
/* EAGAIN or EWOULDBLOCK or EINTR : no data sent */
i = 0;
}
}
if(i != (obj->tosend - obj->sent))
syslog(LOG_NOTICE, "%s: %d bytes send out of %d",
"upnp_event_send", i, obj->tosend - obj->sent);
obj->sent += i;
if(obj->sent == obj->tosend)
obj->state = EWaitingForResponse;
}
static void upnp_event_recv(struct upnp_event_notify * obj)
{
int n;
n = recv(obj->s, obj->buffer, obj->buffersize, 0);
if(n<0) {
if(errno != EAGAIN &&
errno != EWOULDBLOCK &&
errno != EINTR) {
syslog(LOG_ERR, "%s: recv(): %m", "upnp_event_recv");
obj->state = EError;
}
return;
}
syslog(LOG_DEBUG, "%s: (%dbytes) %.*s", "upnp_event_recv",
n, n, obj->buffer);
/* TODO : do something with the data recevied ?
* right now, n (number of bytes received) is ignored
* We may need to recv() more bytes. */
obj->state = EFinished;
if(obj->sub)
obj->sub->seq++;
}
static void
upnp_event_process_notify(struct upnp_event_notify * obj)
{
int err;
socklen_t len;
switch(obj->state) {
case EConnecting:
/* now connected or failed to connect */
len = sizeof(err);
if(getsockopt(obj->s, SOL_SOCKET, SO_ERROR, &err, &len) < 0) {
syslog(LOG_ERR, "%s: getsockopt: %m", "upnp_event_process_notify");
obj->state = EError;
break;
}
if(err != 0) {
errno = err;
syslog(LOG_WARNING, "%s: connect(%s%s): %m",
"upnp_event_process_notify",
obj->addrstr, obj->portstr);
obj->state = EError;
break;
}
upnp_event_prepare(obj);
if(obj->state == ESending)
upnp_event_send(obj);
break;
case ESending:
upnp_event_send(obj);
break;
case EWaitingForResponse:
upnp_event_recv(obj);
break;
case EFinished:
close(obj->s);
obj->s = -1;
break;
default:
syslog(LOG_ERR, "%s: unknown state", "upnp_event_process_notify");
}
}
void upnpevents_selectfds(fd_set *readset, fd_set *writeset, int * max_fd)
{
struct upnp_event_notify * obj;
for(obj = notifylist.lh_first; obj != NULL; obj = obj->entries.le_next) {
syslog(LOG_DEBUG, "upnpevents_selectfds: %p %d %d",
obj, obj->state, obj->s);
if(obj->s >= 0) {
switch(obj->state) {
case ECreated:
upnp_event_notify_connect(obj);
if(obj->state != EConnecting)
break;
case EConnecting:
case ESending:
FD_SET(obj->s, writeset);
if(obj->s > *max_fd)
*max_fd = obj->s;
break;
case EWaitingForResponse:
FD_SET(obj->s, readset);
if(obj->s > *max_fd)
*max_fd = obj->s;
break;
default:
;
}
}
}
}
void upnpevents_processfds(fd_set *readset, fd_set *writeset)
{
struct upnp_event_notify * obj;
struct upnp_event_notify * next;
struct subscriber * sub;
struct subscriber * subnext;
time_t curtime;
for(obj = notifylist.lh_first; obj != NULL; obj = obj->entries.le_next) {
syslog(LOG_DEBUG, "%s: %p %d %d %d %d",
"upnpevents_processfds", obj, obj->state, obj->s,
FD_ISSET(obj->s, readset), FD_ISSET(obj->s, writeset));
if(obj->s >= 0) {
if(FD_ISSET(obj->s, readset) || FD_ISSET(obj->s, writeset))
upnp_event_process_notify(obj);
}
}
obj = notifylist.lh_first;
while(obj != NULL) {
next = obj->entries.le_next;
if(obj->state == EError || obj->state == EFinished) {
if(obj->s >= 0) {
close(obj->s);
}
if(obj->sub)
obj->sub->notify = NULL;
/* remove also the subscriber from the list if there was an error */
if(obj->state == EError && obj->sub) {
syslog(LOG_ERR, "%s: %p, remove subscriber %s after an ERROR cb: %s",
"upnpevents_processfds", obj, obj->sub->uuid, obj->sub->callback);
LIST_REMOVE(obj->sub, entries);
free(obj->sub);
}
if(obj->buffer) {
free(obj->buffer);
}
LIST_REMOVE(obj, entries);
free(obj);
}
obj = next;
}
/* remove timeouted subscribers */
curtime = upnp_time();
for(sub = subscriberlist.lh_first; sub != NULL; ) {
subnext = sub->entries.le_next;
if(sub->timeout && curtime > sub->timeout && sub->notify == NULL) {
syslog(LOG_INFO, "subscriber timeouted : %u > %u SID=%s",
(unsigned)curtime, (unsigned)sub->timeout, sub->uuid);
LIST_REMOVE(sub, entries);
free(sub);
}
sub = subnext;
}
}
#ifdef USE_MINIUPNPDCTL
void write_events_details(int s) {
int n;
char buff[80];
struct upnp_event_notify * obj;
struct subscriber * sub;
write(s, "Events details :\n", 17);
for(obj = notifylist.lh_first; obj != NULL; obj = obj->entries.le_next) {
n = snprintf(buff, sizeof(buff), " %p sub=%p state=%d s=%d\n",
obj, obj->sub, obj->state, obj->s);
write(s, buff, n);
}
write(s, "Subscribers :\n", 14);
for(sub = subscriberlist.lh_first; sub != NULL; sub = sub->entries.le_next) {
n = snprintf(buff, sizeof(buff), " %p timeout=%d seq=%u service=%d\n",
sub, (int)sub->timeout, sub->seq, sub->service);
write(s, buff, n);
n = snprintf(buff, sizeof(buff), " notify=%p %s\n",
sub->notify, sub->uuid);
write(s, buff, n);
n = snprintf(buff, sizeof(buff), " %s\n",
sub->callback);
write(s, buff, n);
}
}
#endif
#endif
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_860_0 |
crossvul-cpp_data_bad_3441_0 | /*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/* Bluetooth SCO sockets. */
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/list.h>
#include <net/sock.h>
#include <asm/system.h>
#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/sco.h>
#define VERSION "0.6"
static int disable_esco;
static const struct proto_ops sco_sock_ops;
static struct bt_sock_list sco_sk_list = {
.lock = __RW_LOCK_UNLOCKED(sco_sk_list.lock)
};
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
static void sco_chan_del(struct sock *sk, int err);
static int sco_conn_del(struct hci_conn *conn, int err);
static void sco_sock_close(struct sock *sk);
static void sco_sock_kill(struct sock *sk);
/* ---- SCO timers ---- */
static void sco_sock_timeout(unsigned long arg)
{
struct sock *sk = (struct sock *) arg;
BT_DBG("sock %p state %d", sk, sk->sk_state);
bh_lock_sock(sk);
sk->sk_err = ETIMEDOUT;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
sco_sock_kill(sk);
sock_put(sk);
}
static void sco_sock_set_timer(struct sock *sk, long timeout)
{
BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
}
static void sco_sock_clear_timer(struct sock *sk)
{
BT_DBG("sock %p state %d", sk, sk->sk_state);
sk_stop_timer(sk, &sk->sk_timer);
}
/* ---- SCO connections ---- */
static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn = hcon->sco_data;
if (conn || status)
return conn;
conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
if (!conn)
return NULL;
spin_lock_init(&conn->lock);
hcon->sco_data = conn;
conn->hcon = hcon;
conn->src = &hdev->bdaddr;
conn->dst = &hcon->dst;
if (hdev->sco_mtu > 0)
conn->mtu = hdev->sco_mtu;
else
conn->mtu = 60;
BT_DBG("hcon %p conn %p", hcon, conn);
return conn;
}
static inline struct sock *sco_chan_get(struct sco_conn *conn)
{
struct sock *sk = NULL;
sco_conn_lock(conn);
sk = conn->sk;
sco_conn_unlock(conn);
return sk;
}
static int sco_conn_del(struct hci_conn *hcon, int err)
{
struct sco_conn *conn = hcon->sco_data;
struct sock *sk;
if (!conn)
return 0;
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
/* Kill socket */
sk = sco_chan_get(conn);
if (sk) {
bh_lock_sock(sk);
sco_sock_clear_timer(sk);
sco_chan_del(sk, err);
bh_unlock_sock(sk);
sco_sock_kill(sk);
}
hcon->sco_data = NULL;
kfree(conn);
return 0;
}
static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
{
int err = 0;
sco_conn_lock(conn);
if (conn->sk)
err = -EBUSY;
else
__sco_chan_add(conn, sk, parent);
sco_conn_unlock(conn);
return err;
}
static int sco_connect(struct sock *sk)
{
bdaddr_t *src = &bt_sk(sk)->src;
bdaddr_t *dst = &bt_sk(sk)->dst;
struct sco_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
int err, type;
BT_DBG("%s -> %s", batostr(src), batostr(dst));
hdev = hci_get_route(dst, src);
if (!hdev)
return -EHOSTUNREACH;
hci_dev_lock_bh(hdev);
err = -ENOMEM;
if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
else
type = SCO_LINK;
hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
if (!hcon)
goto done;
conn = sco_conn_add(hcon, 0);
if (!conn) {
hci_conn_put(hcon);
goto done;
}
/* Update source addr of the socket */
bacpy(src, conn->src);
err = sco_chan_add(conn, sk, NULL);
if (err)
goto done;
if (hcon->state == BT_CONNECTED) {
sco_sock_clear_timer(sk);
sk->sk_state = BT_CONNECTED;
} else {
sk->sk_state = BT_CONNECT;
sco_sock_set_timer(sk, sk->sk_sndtimeo);
}
done:
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
return err;
}
static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
int err, count;
/* Check outgoing MTU */
if (len > conn->mtu)
return -EINVAL;
BT_DBG("sk %p len %d", sk, len);
count = min_t(unsigned int, conn->mtu, len);
skb = bt_skb_send_alloc(sk, count,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
kfree_skb(skb);
return -EFAULT;
}
hci_send_sco(conn->hcon, skb);
return count;
}
static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
{
struct sock *sk = sco_chan_get(conn);
if (!sk)
goto drop;
BT_DBG("sk %p len %d", sk, skb->len);
if (sk->sk_state != BT_CONNECTED)
goto drop;
if (!sock_queue_rcv_skb(sk, skb))
return;
drop:
kfree_skb(skb);
}
/* -------- Socket interface ---------- */
static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba)
{
struct sock *sk;
struct hlist_node *node;
sk_for_each(sk, node, &sco_sk_list.head)
if (!bacmp(&bt_sk(sk)->src, ba))
goto found;
sk = NULL;
found:
return sk;
}
/* Find socket listening on source bdaddr.
* Returns closest match.
*/
static struct sock *sco_get_sock_listen(bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
struct hlist_node *node;
read_lock(&sco_sk_list.lock);
sk_for_each(sk, node, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
/* Exact match. */
if (!bacmp(&bt_sk(sk)->src, src))
break;
/* Closest match */
if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
sk1 = sk;
}
read_unlock(&sco_sk_list.lock);
return node ? sk : sk1;
}
static void sco_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
}
static void sco_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
BT_DBG("parent %p", parent);
/* Close not yet accepted channels */
while ((sk = bt_accept_dequeue(parent, NULL))) {
sco_sock_close(sk);
sco_sock_kill(sk);
}
parent->sk_state = BT_CLOSED;
sock_set_flag(parent, SOCK_ZAPPED);
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
static void sco_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
BT_DBG("sk %p state %d", sk, sk->sk_state);
/* Kill poor orphan */
bt_sock_unlink(&sco_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
static void __sco_sock_close(struct sock *sk)
{
BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
switch (sk->sk_state) {
case BT_LISTEN:
sco_sock_cleanup_listen(sk);
break;
case BT_CONNECTED:
case BT_CONFIG:
case BT_CONNECT:
case BT_DISCONN:
sco_chan_del(sk, ECONNRESET);
break;
default:
sock_set_flag(sk, SOCK_ZAPPED);
break;
}
}
/* Must be called on unlocked socket. */
static void sco_sock_close(struct sock *sk)
{
sco_sock_clear_timer(sk);
lock_sock(sk);
__sco_sock_close(sk);
release_sock(sk);
sco_sock_kill(sk);
}
static void sco_sock_init(struct sock *sk, struct sock *parent)
{
BT_DBG("sk %p", sk);
if (parent)
sk->sk_type = parent->sk_type;
}
static struct proto sco_proto = {
.name = "SCO",
.owner = THIS_MODULE,
.obj_size = sizeof(struct sco_pinfo)
};
static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
{
struct sock *sk;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto);
if (!sk)
return NULL;
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk->sk_destruct = sco_sock_destruct;
sk->sk_sndtimeo = SCO_CONN_TIMEOUT;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk);
bt_sock_link(&sco_sk_list, sk);
return sk;
}
static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_SEQPACKET)
return -ESOCKTNOSUPPORT;
sock->ops = &sco_sock_ops;
sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC);
if (!sk)
return -ENOMEM;
sco_sock_init(sk, NULL);
return 0;
}
static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
bdaddr_t *src = &sa->sco_bdaddr;
int err = 0;
BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
write_lock_bh(&sco_sk_list.lock);
if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
err = -EADDRINUSE;
} else {
/* Save source address */
bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
sk->sk_state = BT_BOUND;
}
write_unlock_bh(&sco_sk_list.lock);
done:
release_sock(sk);
return err;
}
static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p", sk);
if (alen < sizeof(struct sockaddr_sco) ||
addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
return -EBADFD;
if (sk->sk_type != SOCK_SEQPACKET)
return -EINVAL;
lock_sock(sk);
/* Set destination address and psm */
bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
err = sco_connect(sk);
if (err)
goto done;
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
return err;
}
static int sco_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
err = -EBADFD;
goto done;
}
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
return err;
}
static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *ch;
long timeo;
int err = 0;
lock_sock(sk);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
goto done;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (!(ch = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
}
set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
BT_DBG("new socket %p", ch);
done:
release_sock(sk);
return err;
}
static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
BT_DBG("sock %p, sk %p", sock, sk);
addr->sa_family = AF_BLUETOOTH;
*len = sizeof(struct sockaddr_sco);
if (peer)
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
else
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
return 0;
}
static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
err = sock_error(sk);
if (err)
return err;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
lock_sock(sk);
if (sk->sk_state == BT_CONNECTED)
err = sco_send_frame(sk, msg, len);
else
err = -ENOTCONN;
release_sock(sk);
return err;
}
static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct sco_options opts;
struct sco_conninfo cinfo;
int len, err = 0;
BT_DBG("sk %p", sk);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case SCO_OPTIONS:
if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
opts.mtu = sco_pi(sk)->conn->mtu;
BT_DBG("mtu %d", opts.mtu);
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT;
break;
case SCO_CONNINFO:
if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int len, err = 0;
BT_DBG("sk %p", sk);
if (level == SOL_SCO)
return sco_sock_getsockopt_old(sock, optname, optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
lock_sock(sk);
if (!sk->sk_shutdown) {
sk->sk_shutdown = SHUTDOWN_MASK;
sco_sock_clear_timer(sk);
__sco_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED,
sk->sk_lingertime);
}
release_sock(sk);
return err;
}
static int sco_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
sco_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) {
lock_sock(sk);
err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
release_sock(sk);
}
sock_orphan(sk);
sco_sock_kill(sk);
return err;
}
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
{
BT_DBG("conn %p", conn);
sco_pi(sk)->conn = conn;
conn->sk = sk;
if (parent)
bt_accept_enqueue(parent, sk);
}
/* Delete channel.
* Must be called on the locked socket. */
static void sco_chan_del(struct sock *sk, int err)
{
struct sco_conn *conn;
conn = sco_pi(sk)->conn;
BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
if (conn) {
sco_conn_lock(conn);
conn->sk = NULL;
sco_pi(sk)->conn = NULL;
sco_conn_unlock(conn);
hci_conn_put(conn->hcon);
}
sk->sk_state = BT_CLOSED;
sk->sk_err = err;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_ZAPPED);
}
static void sco_conn_ready(struct sco_conn *conn)
{
struct sock *parent;
struct sock *sk = conn->sk;
BT_DBG("conn %p", conn);
sco_conn_lock(conn);
if (sk) {
sco_sock_clear_timer(sk);
bh_lock_sock(sk);
sk->sk_state = BT_CONNECTED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
} else {
parent = sco_get_sock_listen(conn->src);
if (!parent)
goto done;
bh_lock_sock(parent);
sk = sco_sock_alloc(sock_net(parent), NULL,
BTPROTO_SCO, GFP_ATOMIC);
if (!sk) {
bh_unlock_sock(parent);
goto done;
}
sco_sock_init(sk, parent);
bacpy(&bt_sk(sk)->src, conn->src);
bacpy(&bt_sk(sk)->dst, conn->dst);
hci_conn_hold(conn->hcon);
__sco_chan_add(conn, sk, parent);
sk->sk_state = BT_CONNECTED;
/* Wake up parent */
parent->sk_data_ready(parent, 1);
bh_unlock_sock(parent);
}
done:
sco_conn_unlock(conn);
}
/* ----- SCO interface with lower layer (HCI) ----- */
static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
{
register struct sock *sk;
struct hlist_node *node;
int lm = 0;
if (type != SCO_LINK && type != ESCO_LINK)
return -EINVAL;
BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
/* Find listening sockets */
read_lock(&sco_sk_list.lock);
sk_for_each(sk, node, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
lm |= HCI_LM_ACCEPT;
break;
}
}
read_unlock(&sco_sk_list.lock);
return lm;
}
static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
{
BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return -EINVAL;
if (!status) {
struct sco_conn *conn;
conn = sco_conn_add(hcon, status);
if (conn)
sco_conn_ready(conn);
} else
sco_conn_del(hcon, bt_err(status));
return 0;
}
static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
{
BT_DBG("hcon %p reason %d", hcon, reason);
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return -EINVAL;
sco_conn_del(hcon, bt_err(reason));
return 0;
}
static int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
{
struct sco_conn *conn = hcon->sco_data;
if (!conn)
goto drop;
BT_DBG("conn %p len %d", conn, skb->len);
if (skb->len) {
sco_recv_frame(conn, skb);
return 0;
}
drop:
kfree_skb(skb);
return 0;
}
static int sco_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
struct hlist_node *node;
read_lock_bh(&sco_sk_list.lock);
sk_for_each(sk, node, &sco_sk_list.head) {
seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
batostr(&bt_sk(sk)->dst), sk->sk_state);
}
read_unlock_bh(&sco_sk_list.lock);
return 0;
}
static int sco_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, sco_debugfs_show, inode->i_private);
}
static const struct file_operations sco_debugfs_fops = {
.open = sco_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *sco_debugfs;
static const struct proto_ops sco_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = sco_sock_release,
.bind = sco_sock_bind,
.connect = sco_sock_connect,
.listen = sco_sock_listen,
.accept = sco_sock_accept,
.getname = sco_sock_getname,
.sendmsg = sco_sock_sendmsg,
.recvmsg = bt_sock_recvmsg,
.poll = bt_sock_poll,
.ioctl = bt_sock_ioctl,
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
.shutdown = sco_sock_shutdown,
.setsockopt = sco_sock_setsockopt,
.getsockopt = sco_sock_getsockopt
};
static const struct net_proto_family sco_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = sco_sock_create,
};
static struct hci_proto sco_hci_proto = {
.name = "SCO",
.id = HCI_PROTO_SCO,
.connect_ind = sco_connect_ind,
.connect_cfm = sco_connect_cfm,
.disconn_cfm = sco_disconn_cfm,
.recv_scodata = sco_recv_scodata
};
static int __init sco_init(void)
{
int err;
err = proto_register(&sco_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_SCO, &sco_sock_family_ops);
if (err < 0) {
BT_ERR("SCO socket registration failed");
goto error;
}
err = hci_register_proto(&sco_hci_proto);
if (err < 0) {
BT_ERR("SCO protocol registration failed");
bt_sock_unregister(BTPROTO_SCO);
goto error;
}
if (bt_debugfs) {
sco_debugfs = debugfs_create_file("sco", 0444,
bt_debugfs, NULL, &sco_debugfs_fops);
if (!sco_debugfs)
BT_ERR("Failed to create SCO debug file");
}
BT_INFO("SCO (Voice Link) ver %s", VERSION);
BT_INFO("SCO socket layer initialized");
return 0;
error:
proto_unregister(&sco_proto);
return err;
}
static void __exit sco_exit(void)
{
debugfs_remove(sco_debugfs);
if (bt_sock_unregister(BTPROTO_SCO) < 0)
BT_ERR("SCO socket unregistration failed");
if (hci_unregister_proto(&sco_hci_proto) < 0)
BT_ERR("SCO protocol unregistration failed");
proto_unregister(&sco_proto);
}
module_init(sco_init);
module_exit(sco_exit);
module_param(disable_esco, bool, 0644);
MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS("bt-proto-2");
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_3441_0 |
crossvul-cpp_data_good_3270_0 | /*
* linux/fs/ext4/inode.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/inode.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* 64-bit file support on 64-bit platforms by Jakub Jelinek
* (jj@sunsite.ms.mff.cuni.cz)
*
* Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
*/
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/dax.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/mpage.h>
#include <linux/namei.h>
#include <linux/uio.h>
#include <linux/bio.h>
#include <linux/workqueue.h>
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
#include "truncate.h"
#include <trace/events/ext4.h>
#define MPAGE_DA_EXTENT_TAIL 0x01
static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
struct ext4_inode_info *ei)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
__u16 csum_lo;
__u16 csum_hi = 0;
__u32 csum;
csum_lo = le16_to_cpu(raw->i_checksum_lo);
raw->i_checksum_lo = 0;
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
csum_hi = le16_to_cpu(raw->i_checksum_hi);
raw->i_checksum_hi = 0;
}
csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
EXT4_INODE_SIZE(inode->i_sb));
raw->i_checksum_lo = cpu_to_le16(csum_lo);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
raw->i_checksum_hi = cpu_to_le16(csum_hi);
return csum;
}
static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
struct ext4_inode_info *ei)
{
__u32 provided, calculated;
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
cpu_to_le32(EXT4_OS_LINUX) ||
!ext4_has_metadata_csum(inode->i_sb))
return 1;
provided = le16_to_cpu(raw->i_checksum_lo);
calculated = ext4_inode_csum(inode, raw, ei);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
else
calculated &= 0xFFFF;
return provided == calculated;
}
static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
struct ext4_inode_info *ei)
{
__u32 csum;
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
cpu_to_le32(EXT4_OS_LINUX) ||
!ext4_has_metadata_csum(inode->i_sb))
return;
csum = ext4_inode_csum(inode, raw, ei);
raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
raw->i_checksum_hi = cpu_to_le16(csum >> 16);
}
static inline int ext4_begin_ordered_truncate(struct inode *inode,
loff_t new_size)
{
trace_ext4_begin_ordered_truncate(inode, new_size);
/*
* If jinode is zero, then we never opened the file for
* writing, so there's no need to call
* jbd2_journal_begin_ordered_truncate() since there's no
* outstanding writes we need to flush.
*/
if (!EXT4_I(inode)->jinode)
return 0;
return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
EXT4_I(inode)->jinode,
new_size);
}
static void ext4_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
int pextents);
/*
* Test whether an inode is a fast symlink.
*/
int ext4_inode_is_fast_symlink(struct inode *inode)
{
int ea_blocks = EXT4_I(inode)->i_file_acl ?
EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
if (ext4_has_inline_data(inode))
return 0;
return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
}
/*
* Restart the transaction associated with *handle. This does a commit,
* so before we call here everything must be consistently dirtied against
* this transaction.
*/
int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
int nblocks)
{
int ret;
/*
* Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
* moment, get_block can be called only for blocks inside i_size since
* page cache has been already dropped and writes are blocked by
* i_mutex. So we can safely drop the i_data_sem here.
*/
BUG_ON(EXT4_JOURNAL(inode) == NULL);
jbd_debug(2, "restarting handle %p\n", handle);
up_write(&EXT4_I(inode)->i_data_sem);
ret = ext4_journal_restart(handle, nblocks);
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
return ret;
}
/*
* Called at the last iput() if i_nlink is zero.
*/
void ext4_evict_inode(struct inode *inode)
{
handle_t *handle;
int err;
trace_ext4_evict_inode(inode);
if (inode->i_nlink) {
/*
* When journalling data dirty buffers are tracked only in the
* journal. So although mm thinks everything is clean and
* ready for reaping the inode might still have some pages to
* write in the running transaction or waiting to be
* checkpointed. Thus calling jbd2_journal_invalidatepage()
* (via truncate_inode_pages()) to discard these buffers can
* cause data loss. Also even if we did not discard these
* buffers, we would have no way to find them after the inode
* is reaped and thus user could see stale data if he tries to
* read them before the transaction is checkpointed. So be
* careful and force everything to disk here... We use
* ei->i_datasync_tid to store the newest transaction
* containing inode's data.
*
* Note that directories do not have this problem because they
* don't use page cache.
*/
if (ext4_should_journal_data(inode) &&
(S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
inode->i_ino != EXT4_JOURNAL_INO) {
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
jbd2_complete_transaction(journal, commit_tid);
filemap_write_and_wait(&inode->i_data);
}
truncate_inode_pages_final(&inode->i_data);
goto no_delete;
}
if (is_bad_inode(inode))
goto no_delete;
dquot_initialize(inode);
if (ext4_should_order_data(inode))
ext4_begin_ordered_truncate(inode, 0);
truncate_inode_pages_final(&inode->i_data);
/*
* Protect us against freezing - iput() caller didn't have to have any
* protection against it
*/
sb_start_intwrite(inode->i_sb);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
ext4_blocks_for_truncate(inode)+3);
if (IS_ERR(handle)) {
ext4_std_error(inode->i_sb, PTR_ERR(handle));
/*
* If we're going to skip the normal cleanup, we still need to
* make sure that the in-core orphan linked list is properly
* cleaned up.
*/
ext4_orphan_del(NULL, inode);
sb_end_intwrite(inode->i_sb);
goto no_delete;
}
if (IS_SYNC(inode))
ext4_handle_sync(handle);
inode->i_size = 0;
err = ext4_mark_inode_dirty(handle, inode);
if (err) {
ext4_warning(inode->i_sb,
"couldn't mark inode dirty (err %d)", err);
goto stop_handle;
}
if (inode->i_blocks)
ext4_truncate(inode);
/*
* ext4_ext_truncate() doesn't reserve any slop when it
* restarts journal transactions; therefore there may not be
* enough credits left in the handle to remove the inode from
* the orphan list and set the dtime field.
*/
if (!ext4_handle_has_enough_credits(handle, 3)) {
err = ext4_journal_extend(handle, 3);
if (err > 0)
err = ext4_journal_restart(handle, 3);
if (err != 0) {
ext4_warning(inode->i_sb,
"couldn't extend journal (err %d)", err);
stop_handle:
ext4_journal_stop(handle);
ext4_orphan_del(NULL, inode);
sb_end_intwrite(inode->i_sb);
goto no_delete;
}
}
/*
* Kill off the orphan record which ext4_truncate created.
* AKPM: I think this can be inside the above `if'.
* Note that ext4_orphan_del() has to be able to cope with the
* deletion of a non-existent orphan - this is because we don't
* know if ext4_truncate() actually created an orphan record.
* (Well, we could do this if we need to, but heck - it works)
*/
ext4_orphan_del(handle, inode);
EXT4_I(inode)->i_dtime = get_seconds();
/*
* One subtle ordering requirement: if anything has gone wrong
* (transaction abort, IO errors, whatever), then we can still
* do these next steps (the fs will already have been marked as
* having errors), but we can't free the inode if the mark_dirty
* fails.
*/
if (ext4_mark_inode_dirty(handle, inode))
/* If that failed, just do the required in-core inode clear. */
ext4_clear_inode(inode);
else
ext4_free_inode(handle, inode);
ext4_journal_stop(handle);
sb_end_intwrite(inode->i_sb);
return;
no_delete:
ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
}
#ifdef CONFIG_QUOTA
qsize_t *ext4_get_reserved_space(struct inode *inode)
{
return &EXT4_I(inode)->i_reserved_quota;
}
#endif
/*
* Called with i_data_sem down, which is important since we can call
* ext4_discard_preallocations() from here.
*/
void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
spin_lock(&ei->i_block_reservation_lock);
trace_ext4_da_update_reserve_space(inode, used, quota_claim);
if (unlikely(used > ei->i_reserved_data_blocks)) {
ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
"with only %d reserved data blocks",
__func__, inode->i_ino, used,
ei->i_reserved_data_blocks);
WARN_ON(1);
used = ei->i_reserved_data_blocks;
}
/* Update per-inode reservations */
ei->i_reserved_data_blocks -= used;
percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
/* Update quota subsystem for data blocks */
if (quota_claim)
dquot_claim_block(inode, EXT4_C2B(sbi, used));
else {
/*
* We did fallocate with an offset that is already delayed
* allocated. So on delayed allocated writeback we should
* not re-claim the quota for fallocated blocks.
*/
dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
}
/*
* If we have done all the pending block allocations and if
* there aren't any writers on the inode, we can discard the
* inode's preallocations.
*/
if ((ei->i_reserved_data_blocks == 0) &&
(atomic_read(&inode->i_writecount) == 0))
ext4_discard_preallocations(inode);
}
static int __check_block_validity(struct inode *inode, const char *func,
unsigned int line,
struct ext4_map_blocks *map)
{
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
map->m_len)) {
ext4_error_inode(inode, func, line, map->m_pblk,
"lblock %lu mapped to illegal pblock "
"(length %d)", (unsigned long) map->m_lblk,
map->m_len);
return -EFSCORRUPTED;
}
return 0;
}
int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
ext4_lblk_t len)
{
int ret;
if (ext4_encrypted_inode(inode))
return ext4_encrypted_zeroout(inode, lblk, pblk, len);
ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
if (ret > 0)
ret = 0;
return ret;
}
#define check_block_validity(inode, map) \
__check_block_validity((inode), __func__, __LINE__, (map))
#ifdef ES_AGGRESSIVE_TEST
static void ext4_map_blocks_es_recheck(handle_t *handle,
struct inode *inode,
struct ext4_map_blocks *es_map,
struct ext4_map_blocks *map,
int flags)
{
int retval;
map->m_flags = 0;
/*
* There is a race window that the result is not the same.
* e.g. xfstests #223 when dioread_nolock enables. The reason
* is that we lookup a block mapping in extent status tree with
* out taking i_data_sem. So at the time the unwritten extent
* could be converted.
*/
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
} else {
retval = ext4_ind_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
}
up_read((&EXT4_I(inode)->i_data_sem));
/*
* We don't check m_len because extent will be collpased in status
* tree. So the m_len might not equal.
*/
if (es_map->m_lblk != map->m_lblk ||
es_map->m_flags != map->m_flags ||
es_map->m_pblk != map->m_pblk) {
printk("ES cache assertion failed for inode: %lu "
"es_cached ex [%d/%d/%llu/%x] != "
"found ex [%d/%d/%llu/%x] retval %d flags %x\n",
inode->i_ino, es_map->m_lblk, es_map->m_len,
es_map->m_pblk, es_map->m_flags, map->m_lblk,
map->m_len, map->m_pblk, map->m_flags,
retval, flags);
}
}
#endif /* ES_AGGRESSIVE_TEST */
/*
* The ext4_map_blocks() function tries to look up the requested blocks,
* and returns if the blocks are already mapped.
*
* Otherwise it takes the write lock of the i_data_sem and allocate blocks
* and store the allocated blocks in the result buffer head and mark it
* mapped.
*
* If file type is extents based, it will call ext4_ext_map_blocks(),
* Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
* based files
*
* On success, it returns the number of blocks being mapped or allocated. if
* create==0 and the blocks are pre-allocated and unwritten, the resulting @map
* is marked as unwritten. If the create == 1, it will mark @map as mapped.
*
* It returns 0 if plain look up failed (blocks have not been allocated), in
* that case, @map is returned as unmapped but we still do fill map->m_len to
* indicate the length of a hole starting at map->m_lblk.
*
* It returns the error in case of allocation failure.
*/
int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags)
{
struct extent_status es;
int retval;
int ret = 0;
#ifdef ES_AGGRESSIVE_TEST
struct ext4_map_blocks orig_map;
memcpy(&orig_map, map, sizeof(*map));
#endif
map->m_flags = 0;
ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
"logical block %lu\n", inode->i_ino, flags, map->m_len,
(unsigned long) map->m_lblk);
/*
* ext4_map_blocks returns an int, and m_len is an unsigned int
*/
if (unlikely(map->m_len > INT_MAX))
map->m_len = INT_MAX;
/* We can handle the block number less than EXT_MAX_BLOCKS */
if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
return -EFSCORRUPTED;
/* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
map->m_pblk = ext4_es_pblock(&es) +
map->m_lblk - es.es_lblk;
map->m_flags |= ext4_es_is_written(&es) ?
EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
retval = es.es_len - (map->m_lblk - es.es_lblk);
if (retval > map->m_len)
retval = map->m_len;
map->m_len = retval;
} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
map->m_pblk = 0;
retval = es.es_len - (map->m_lblk - es.es_lblk);
if (retval > map->m_len)
retval = map->m_len;
map->m_len = retval;
retval = 0;
} else {
BUG_ON(1);
}
#ifdef ES_AGGRESSIVE_TEST
ext4_map_blocks_es_recheck(handle, inode, map,
&orig_map, flags);
#endif
goto found;
}
/*
* Try to see if we can get the block without requesting a new
* file system block.
*/
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
} else {
retval = ext4_ind_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
}
if (retval > 0) {
unsigned int status;
if (unlikely(retval != map->m_len)) {
ext4_warning(inode->i_sb,
"ES len assertion failed for inode "
"%lu: retval %d != map->m_len %d",
inode->i_ino, retval, map->m_len);
WARN_ON(1);
}
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
!(status & EXTENT_STATUS_WRITTEN) &&
ext4_find_delalloc_range(inode, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk,
map->m_len, map->m_pblk, status);
if (ret < 0)
retval = ret;
}
up_read((&EXT4_I(inode)->i_data_sem));
found:
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
}
/* If it is only a block(s) look up */
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
return retval;
/*
* Returns if the blocks have already allocated
*
* Note that if blocks have been preallocated
* ext4_ext_get_block() returns the create = 0
* with buffer head unmapped.
*/
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
/*
* If we need to convert extent to unwritten
* we continue and do the actual work in
* ext4_ext_map_blocks()
*/
if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
return retval;
/*
* Here we clear m_flags because after allocating an new extent,
* it will be set again.
*/
map->m_flags &= ~EXT4_MAP_FLAGS;
/*
* New blocks allocate and/or writing to unwritten extent
* will possibly result in updating i_data, so we take
* the write lock of i_data_sem, and call get_block()
* with create == 1 flag.
*/
down_write(&EXT4_I(inode)->i_data_sem);
/*
* We need to check for EXT4 here because migrate
* could have changed the inode type in between
*/
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, flags);
} else {
retval = ext4_ind_map_blocks(handle, inode, map, flags);
if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
/*
* We allocated new blocks which will result in
* i_data's format changing. Force the migrate
* to fail by clearing migrate flags
*/
ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
}
/*
* Update reserved blocks/metadata blocks after successful
* block allocation which had been deferred till now. We don't
* support fallocate for non extent files. So we can update
* reserve space here.
*/
if ((retval > 0) &&
(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
ext4_da_update_reserve_space(inode, retval, 1);
}
if (retval > 0) {
unsigned int status;
if (unlikely(retval != map->m_len)) {
ext4_warning(inode->i_sb,
"ES len assertion failed for inode "
"%lu: retval %d != map->m_len %d",
inode->i_ino, retval, map->m_len);
WARN_ON(1);
}
/*
* We have to zeroout blocks before inserting them into extent
* status tree. Otherwise someone could look them up there and
* use them before they are really zeroed.
*/
if (flags & EXT4_GET_BLOCKS_ZERO &&
map->m_flags & EXT4_MAP_MAPPED &&
map->m_flags & EXT4_MAP_NEW) {
ret = ext4_issue_zeroout(inode, map->m_lblk,
map->m_pblk, map->m_len);
if (ret) {
retval = ret;
goto out_sem;
}
}
/*
* If the extent has been zeroed out, we don't need to update
* extent status tree.
*/
if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
if (ext4_es_is_written(&es))
goto out_sem;
}
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
!(status & EXTENT_STATUS_WRITTEN) &&
ext4_find_delalloc_range(inode, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
if (ret < 0) {
retval = ret;
goto out_sem;
}
}
out_sem:
up_write((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
/*
* Inodes with freshly allocated blocks where contents will be
* visible after transaction commit must be on transaction's
* ordered data list.
*/
if (map->m_flags & EXT4_MAP_NEW &&
!(map->m_flags & EXT4_MAP_UNWRITTEN) &&
!(flags & EXT4_GET_BLOCKS_ZERO) &&
!IS_NOQUOTA(inode) &&
ext4_should_order_data(inode)) {
ret = ext4_jbd2_file_inode(handle, inode);
if (ret)
return ret;
}
}
return retval;
}
/*
* Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
* we have to be careful as someone else may be manipulating b_state as well.
*/
static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
{
unsigned long old_state;
unsigned long new_state;
flags &= EXT4_MAP_FLAGS;
/* Dummy buffer_head? Set non-atomically. */
if (!bh->b_page) {
bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
return;
}
/*
* Someone else may be modifying b_state. Be careful! This is ugly but
* once we get rid of using bh as a container for mapping information
* to pass to / from get_block functions, this can go away.
*/
do {
old_state = READ_ONCE(bh->b_state);
new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
} while (unlikely(
cmpxchg(&bh->b_state, old_state, new_state) != old_state));
}
static int _ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int flags)
{
struct ext4_map_blocks map;
int ret = 0;
if (ext4_has_inline_data(inode))
return -ERANGE;
map.m_lblk = iblock;
map.m_len = bh->b_size >> inode->i_blkbits;
ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
flags);
if (ret > 0) {
map_bh(bh, inode->i_sb, map.m_pblk);
ext4_update_bh_state(bh, map.m_flags);
bh->b_size = inode->i_sb->s_blocksize * map.m_len;
ret = 0;
}
return ret;
}
int ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
return _ext4_get_block(inode, iblock, bh,
create ? EXT4_GET_BLOCKS_CREATE : 0);
}
/*
* Get block function used when preparing for buffered write if we require
* creating an unwritten extent if blocks haven't been allocated. The extent
* will be converted to written after the IO is complete.
*/
int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
inode->i_ino, create);
return _ext4_get_block(inode, iblock, bh_result,
EXT4_GET_BLOCKS_IO_CREATE_EXT);
}
/* Maximum number of blocks we map for direct IO at once. */
#define DIO_MAX_BLOCKS 4096
/*
* Get blocks function for the cases that need to start a transaction -
* generally difference cases of direct IO and DAX IO. It also handles retries
* in case of ENOSPC.
*/
static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int flags)
{
int dio_credits;
handle_t *handle;
int retries = 0;
int ret;
/* Trim mapping request to maximum we can map at once for DIO */
if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
dio_credits = ext4_chunk_trans_blocks(inode,
bh_result->b_size >> inode->i_blkbits);
retry:
handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = _ext4_get_block(inode, iblock, bh_result, flags);
ext4_journal_stop(handle);
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
return ret;
}
/* Get block function for DIO reads and writes to inodes without extents */
int ext4_dio_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
/* We don't expect handle for direct IO */
WARN_ON_ONCE(ext4_journal_current_handle());
if (!create)
return _ext4_get_block(inode, iblock, bh, 0);
return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
}
/*
* Get block function for AIO DIO writes when we create unwritten extent if
* blocks are not allocated yet. The extent will be converted to written
* after IO is complete.
*/
static int ext4_dio_get_block_unwritten_async(struct inode *inode,
sector_t iblock, struct buffer_head *bh_result, int create)
{
int ret;
/* We don't expect handle for direct IO */
WARN_ON_ONCE(ext4_journal_current_handle());
ret = ext4_get_block_trans(inode, iblock, bh_result,
EXT4_GET_BLOCKS_IO_CREATE_EXT);
/*
* When doing DIO using unwritten extents, we need io_end to convert
* unwritten extents to written on IO completion. We allocate io_end
* once we spot unwritten extent and store it in b_private. Generic
* DIO code keeps b_private set and furthermore passes the value to
* our completion callback in 'private' argument.
*/
if (!ret && buffer_unwritten(bh_result)) {
if (!bh_result->b_private) {
ext4_io_end_t *io_end;
io_end = ext4_init_io_end(inode, GFP_KERNEL);
if (!io_end)
return -ENOMEM;
bh_result->b_private = io_end;
ext4_set_io_unwritten_flag(inode, io_end);
}
set_buffer_defer_completion(bh_result);
}
return ret;
}
/*
* Get block function for non-AIO DIO writes when we create unwritten extent if
* blocks are not allocated yet. The extent will be converted to written
* after IO is complete from ext4_ext_direct_IO() function.
*/
static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
sector_t iblock, struct buffer_head *bh_result, int create)
{
int ret;
/* We don't expect handle for direct IO */
WARN_ON_ONCE(ext4_journal_current_handle());
ret = ext4_get_block_trans(inode, iblock, bh_result,
EXT4_GET_BLOCKS_IO_CREATE_EXT);
/*
* Mark inode as having pending DIO writes to unwritten extents.
* ext4_ext_direct_IO() checks this flag and converts extents to
* written.
*/
if (!ret && buffer_unwritten(bh_result))
ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
return ret;
}
static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
int ret;
ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n",
inode->i_ino, create);
/* We don't expect handle for direct IO */
WARN_ON_ONCE(ext4_journal_current_handle());
ret = _ext4_get_block(inode, iblock, bh_result, 0);
/*
* Blocks should have been preallocated! ext4_file_write_iter() checks
* that.
*/
WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result));
return ret;
}
/*
* `handle' can be NULL if create is zero
*/
struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
ext4_lblk_t block, int map_flags)
{
struct ext4_map_blocks map;
struct buffer_head *bh;
int create = map_flags & EXT4_GET_BLOCKS_CREATE;
int err;
J_ASSERT(handle != NULL || create == 0);
map.m_lblk = block;
map.m_len = 1;
err = ext4_map_blocks(handle, inode, &map, map_flags);
if (err == 0)
return create ? ERR_PTR(-ENOSPC) : NULL;
if (err < 0)
return ERR_PTR(err);
bh = sb_getblk(inode->i_sb, map.m_pblk);
if (unlikely(!bh))
return ERR_PTR(-ENOMEM);
if (map.m_flags & EXT4_MAP_NEW) {
J_ASSERT(create != 0);
J_ASSERT(handle != NULL);
/*
* Now that we do not always journal data, we should
* keep in mind whether this should always journal the
* new buffer as metadata. For now, regular file
* writes use ext4_get_block instead, so it's not a
* problem.
*/
lock_buffer(bh);
BUFFER_TRACE(bh, "call get_create_access");
err = ext4_journal_get_create_access(handle, bh);
if (unlikely(err)) {
unlock_buffer(bh);
goto errout;
}
if (!buffer_uptodate(bh)) {
memset(bh->b_data, 0, inode->i_sb->s_blocksize);
set_buffer_uptodate(bh);
}
unlock_buffer(bh);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (unlikely(err))
goto errout;
} else
BUFFER_TRACE(bh, "not a new buffer");
return bh;
errout:
brelse(bh);
return ERR_PTR(err);
}
struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
ext4_lblk_t block, int map_flags)
{
struct buffer_head *bh;
bh = ext4_getblk(handle, inode, block, map_flags);
if (IS_ERR(bh))
return bh;
if (!bh || buffer_uptodate(bh))
return bh;
ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
put_bh(bh);
return ERR_PTR(-EIO);
}
int ext4_walk_page_buffers(handle_t *handle,
struct buffer_head *head,
unsigned from,
unsigned to,
int *partial,
int (*fn)(handle_t *handle,
struct buffer_head *bh))
{
struct buffer_head *bh;
unsigned block_start, block_end;
unsigned blocksize = head->b_size;
int err, ret = 0;
struct buffer_head *next;
for (bh = head, block_start = 0;
ret == 0 && (bh != head || !block_start);
block_start = block_end, bh = next) {
next = bh->b_this_page;
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (partial && !buffer_uptodate(bh))
*partial = 1;
continue;
}
err = (*fn)(handle, bh);
if (!ret)
ret = err;
}
return ret;
}
/*
* To preserve ordering, it is essential that the hole instantiation and
* the data write be encapsulated in a single transaction. We cannot
* close off a transaction and start a new one between the ext4_get_block()
* and the commit_write(). So doing the jbd2_journal_start at the start of
* prepare_write() is the right place.
*
* Also, this function can nest inside ext4_writepage(). In that case, we
* *know* that ext4_writepage() has generated enough buffer credits to do the
* whole page. So we won't block on the journal in that case, which is good,
* because the caller may be PF_MEMALLOC.
*
* By accident, ext4 can be reentered when a transaction is open via
* quota file writes. If we were to commit the transaction while thus
* reentered, there can be a deadlock - we would be holding a quota
* lock, and the commit would never complete if another thread had a
* transaction open and was blocking on the quota lock - a ranking
* violation.
*
* So what we do is to rely on the fact that jbd2_journal_stop/journal_start
* will _not_ run commit under these circumstances because handle->h_ref
* is elevated. We'll still have enough credits for the tiny quotafile
* write.
*/
int do_journal_get_write_access(handle_t *handle,
struct buffer_head *bh)
{
int dirty = buffer_dirty(bh);
int ret;
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
/*
* __block_write_begin() could have dirtied some buffers. Clean
* the dirty bit as jbd2_journal_get_write_access() could complain
* otherwise about fs integrity issues. Setting of the dirty bit
* by __block_write_begin() isn't a real problem here as we clear
* the bit before releasing a page lock and thus writeback cannot
* ever write the buffer.
*/
if (dirty)
clear_buffer_dirty(bh);
BUFFER_TRACE(bh, "get write access");
ret = ext4_journal_get_write_access(handle, bh);
if (!ret && dirty)
ret = ext4_handle_dirty_metadata(handle, NULL, bh);
return ret;
}
#ifdef CONFIG_EXT4_FS_ENCRYPTION
static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block)
{
unsigned from = pos & (PAGE_SIZE - 1);
unsigned to = from + len;
struct inode *inode = page->mapping->host;
unsigned block_start, block_end;
sector_t block;
int err = 0;
unsigned blocksize = inode->i_sb->s_blocksize;
unsigned bbits;
struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
bool decrypt = false;
BUG_ON(!PageLocked(page));
BUG_ON(from > PAGE_SIZE);
BUG_ON(to > PAGE_SIZE);
BUG_ON(from > to);
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
head = page_buffers(page);
bbits = ilog2(blocksize);
block = (sector_t)page->index << (PAGE_SHIFT - bbits);
for (bh = head, block_start = 0; bh != head || !block_start;
block++, block_start = block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (PageUptodate(page)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
}
continue;
}
if (buffer_new(bh))
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
break;
if (buffer_new(bh)) {
unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
if (PageUptodate(page)) {
clear_buffer_new(bh);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
continue;
}
if (block_end > to || block_start < from)
zero_user_segments(page, to, block_end,
block_start, from);
continue;
}
}
if (PageUptodate(page)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
continue;
}
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
ll_rw_block(READ, 1, &bh);
*wait_bh++ = bh;
decrypt = ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode);
}
}
/*
* If we issued read requests, let them complete.
*/
while (wait_bh > wait) {
wait_on_buffer(*--wait_bh);
if (!buffer_uptodate(*wait_bh))
err = -EIO;
}
if (unlikely(err))
page_zero_new_buffers(page, from, to);
else if (decrypt)
err = ext4_decrypt(page);
return err;
}
#endif
static int ext4_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
int ret, needed_blocks;
handle_t *handle;
int retries = 0;
struct page *page;
pgoff_t index;
unsigned from, to;
trace_ext4_write_begin(inode, pos, len, flags);
/*
* Reserve one block more for addition to orphan list in case
* we allocate blocks but write fails for some reason
*/
needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
index = pos >> PAGE_SHIFT;
from = pos & (PAGE_SIZE - 1);
to = from + len;
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
flags, pagep);
if (ret < 0)
return ret;
if (ret == 1)
return 0;
}
/*
* grab_cache_page_write_begin() can take a long time if the
* system is thrashing due to memory pressure, or if the page
* is being written back. So grab it first before we start
* the transaction handle. This also allows us to allocate
* the page (if needed) without using GFP_NOFS.
*/
retry_grab:
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
unlock_page(page);
retry_journal:
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
if (IS_ERR(handle)) {
put_page(page);
return PTR_ERR(handle);
}
lock_page(page);
if (page->mapping != mapping) {
/* The page got truncated from under us */
unlock_page(page);
put_page(page);
ext4_journal_stop(handle);
goto retry_grab;
}
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
if (ext4_should_dioread_nolock(inode))
ret = ext4_block_write_begin(page, pos, len,
ext4_get_block_unwritten);
else
ret = ext4_block_write_begin(page, pos, len,
ext4_get_block);
#else
if (ext4_should_dioread_nolock(inode))
ret = __block_write_begin(page, pos, len,
ext4_get_block_unwritten);
else
ret = __block_write_begin(page, pos, len, ext4_get_block);
#endif
if (!ret && ext4_should_journal_data(inode)) {
ret = ext4_walk_page_buffers(handle, page_buffers(page),
from, to, NULL,
do_journal_get_write_access);
}
if (ret) {
unlock_page(page);
/*
* __block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
* i_size_read because we hold i_mutex.
*
* Add inode to orphan list in case we crash before
* truncate finishes
*/
if (pos + len > inode->i_size && ext4_can_truncate(inode))
ext4_orphan_add(handle, inode);
ext4_journal_stop(handle);
if (pos + len > inode->i_size) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might
* still be on the orphan list; we need to
* make sure the inode is removed from the
* orphan list in that case.
*/
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
}
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_journal;
put_page(page);
return ret;
}
*pagep = page;
return ret;
}
/* For write_end() in data=journal mode */
static int write_end_fn(handle_t *handle, struct buffer_head *bh)
{
int ret;
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
set_buffer_uptodate(bh);
ret = ext4_handle_dirty_metadata(handle, NULL, bh);
clear_buffer_meta(bh);
clear_buffer_prio(bh);
return ret;
}
/*
* We need to pick up the new inode size which generic_commit_write gave us
* `file' can be NULL - eg, when called from page_symlink().
*
* ext4 never places buffers on inode->i_mapping->private_list. metadata
* buffers are managed internally.
*/
static int ext4_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host;
loff_t old_size = inode->i_size;
int ret = 0, ret2;
int i_size_changed = 0;
trace_ext4_write_end(inode, pos, len, copied);
if (ext4_has_inline_data(inode)) {
ret = ext4_write_inline_data_end(inode, pos, len,
copied, page);
if (ret < 0)
goto errout;
copied = ret;
} else
copied = block_write_end(file, mapping, pos,
len, copied, page, fsdata);
/*
* it's important to update i_size while still holding page lock:
* page writeout could otherwise come in and zero beyond i_size.
*/
i_size_changed = ext4_update_inode_size(inode, pos + copied);
unlock_page(page);
put_page(page);
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
/*
* Don't mark the inode dirty under page lock. First, it unnecessarily
* makes the holding time of page lock longer. Second, it forces lock
* ordering of page lock and transaction start for journaling
* filesystems.
*/
if (i_size_changed)
ext4_mark_inode_dirty(handle, inode);
if (pos + len > inode->i_size && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
* inode->i_size. So truncate them
*/
ext4_orphan_add(handle, inode);
errout:
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
if (pos + len > inode->i_size) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
* is removed from the orphan list in that case.
*/
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
}
return ret ? ret : copied;
}
/*
* This is a private version of page_zero_new_buffers() which doesn't
* set the buffer to be dirty, since in data=journalled mode we need
* to call ext4_handle_dirty_metadata() instead.
*/
static void zero_new_buffers(struct page *page, unsigned from, unsigned to)
{
unsigned int block_start = 0, block_end;
struct buffer_head *head, *bh;
bh = head = page_buffers(page);
do {
block_end = block_start + bh->b_size;
if (buffer_new(bh)) {
if (block_end > from && block_start < to) {
if (!PageUptodate(page)) {
unsigned start, size;
start = max(from, block_start);
size = min(to, block_end) - start;
zero_user(page, start, size);
set_buffer_uptodate(bh);
}
clear_buffer_new(bh);
}
}
block_start = block_end;
bh = bh->b_this_page;
} while (bh != head);
}
static int ext4_journalled_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host;
loff_t old_size = inode->i_size;
int ret = 0, ret2;
int partial = 0;
unsigned from, to;
int size_changed = 0;
trace_ext4_journalled_write_end(inode, pos, len, copied);
from = pos & (PAGE_SIZE - 1);
to = from + len;
BUG_ON(!ext4_handle_valid(handle));
if (ext4_has_inline_data(inode))
copied = ext4_write_inline_data_end(inode, pos, len,
copied, page);
else {
if (copied < len) {
if (!PageUptodate(page))
copied = 0;
zero_new_buffers(page, from+copied, to);
}
ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
to, &partial, write_end_fn);
if (!partial)
SetPageUptodate(page);
}
size_changed = ext4_update_inode_size(inode, pos + copied);
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
unlock_page(page);
put_page(page);
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
if (size_changed) {
ret2 = ext4_mark_inode_dirty(handle, inode);
if (!ret)
ret = ret2;
}
if (pos + len > inode->i_size && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
* inode->i_size. So truncate them
*/
ext4_orphan_add(handle, inode);
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
if (pos + len > inode->i_size) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
* is removed from the orphan list in that case.
*/
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
}
return ret ? ret : copied;
}
/*
* Reserve space for a single cluster
*/
static int ext4_da_reserve_space(struct inode *inode)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
int ret;
/*
* We will charge metadata quota at writeout time; this saves
* us from metadata over-estimation, though we may go over by
* a small amount in the end. Here we just reserve for data.
*/
ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
if (ret)
return ret;
spin_lock(&ei->i_block_reservation_lock);
if (ext4_claim_free_clusters(sbi, 1, 0)) {
spin_unlock(&ei->i_block_reservation_lock);
dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
return -ENOSPC;
}
ei->i_reserved_data_blocks++;
trace_ext4_da_reserve_space(inode);
spin_unlock(&ei->i_block_reservation_lock);
return 0; /* success */
}
static void ext4_da_release_space(struct inode *inode, int to_free)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
if (!to_free)
return; /* Nothing to release, exit */
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
trace_ext4_da_release_space(inode, to_free);
if (unlikely(to_free > ei->i_reserved_data_blocks)) {
/*
* if there aren't enough reserved blocks, then the
* counter is messed up somewhere. Since this
* function is called from invalidate page, it's
* harmless to return without any action.
*/
ext4_warning(inode->i_sb, "ext4_da_release_space: "
"ino %lu, to_free %d with only %d reserved "
"data blocks", inode->i_ino, to_free,
ei->i_reserved_data_blocks);
WARN_ON(1);
to_free = ei->i_reserved_data_blocks;
}
ei->i_reserved_data_blocks -= to_free;
/* update fs dirty data blocks counter */
percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
}
static void ext4_da_page_release_reservation(struct page *page,
unsigned int offset,
unsigned int length)
{
int to_release = 0, contiguous_blks = 0;
struct buffer_head *head, *bh;
unsigned int curr_off = 0;
struct inode *inode = page->mapping->host;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
unsigned int stop = offset + length;
int num_clusters;
ext4_fsblk_t lblk;
BUG_ON(stop > PAGE_SIZE || stop < length);
head = page_buffers(page);
bh = head;
do {
unsigned int next_off = curr_off + bh->b_size;
if (next_off > stop)
break;
if ((offset <= curr_off) && (buffer_delay(bh))) {
to_release++;
contiguous_blks++;
clear_buffer_delay(bh);
} else if (contiguous_blks) {
lblk = page->index <<
(PAGE_SHIFT - inode->i_blkbits);
lblk += (curr_off >> inode->i_blkbits) -
contiguous_blks;
ext4_es_remove_extent(inode, lblk, contiguous_blks);
contiguous_blks = 0;
}
curr_off = next_off;
} while ((bh = bh->b_this_page) != head);
if (contiguous_blks) {
lblk = page->index << (PAGE_SHIFT - inode->i_blkbits);
lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
ext4_es_remove_extent(inode, lblk, contiguous_blks);
}
/* If we have released all the blocks belonging to a cluster, then we
* need to release the reserved space for that cluster. */
num_clusters = EXT4_NUM_B2C(sbi, to_release);
while (num_clusters > 0) {
lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) +
((num_clusters - 1) << sbi->s_cluster_bits);
if (sbi->s_cluster_ratio == 1 ||
!ext4_find_delalloc_cluster(inode, lblk))
ext4_da_release_space(inode, 1);
num_clusters--;
}
}
/*
* Delayed allocation stuff
*/
struct mpage_da_data {
struct inode *inode;
struct writeback_control *wbc;
pgoff_t first_page; /* The first page to write */
pgoff_t next_page; /* Current page to examine */
pgoff_t last_page; /* Last page to examine */
/*
* Extent to map - this can be after first_page because that can be
* fully mapped. We somewhat abuse m_flags to store whether the extent
* is delalloc or unwritten.
*/
struct ext4_map_blocks map;
struct ext4_io_submit io_submit; /* IO submission data */
};
static void mpage_release_unused_pages(struct mpage_da_data *mpd,
bool invalidate)
{
int nr_pages, i;
pgoff_t index, end;
struct pagevec pvec;
struct inode *inode = mpd->inode;
struct address_space *mapping = inode->i_mapping;
/* This is necessary when next_page == 0. */
if (mpd->first_page >= mpd->next_page)
return;
index = mpd->first_page;
end = mpd->next_page - 1;
if (invalidate) {
ext4_lblk_t start, last;
start = index << (PAGE_SHIFT - inode->i_blkbits);
last = end << (PAGE_SHIFT - inode->i_blkbits);
ext4_es_remove_extent(inode, start, last - start + 1);
}
pagevec_init(&pvec, 0);
while (index <= end) {
nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
if (page->index > end)
break;
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
if (invalidate) {
block_invalidatepage(page, 0, PAGE_SIZE);
ClearPageUptodate(page);
}
unlock_page(page);
}
index = pvec.pages[nr_pages - 1]->index + 1;
pagevec_release(&pvec);
}
}
static void ext4_print_free_blocks(struct inode *inode)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct super_block *sb = inode->i_sb;
struct ext4_inode_info *ei = EXT4_I(inode);
ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
EXT4_C2B(EXT4_SB(inode->i_sb),
ext4_count_free_clusters(sb)));
ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
(long long) EXT4_C2B(EXT4_SB(sb),
percpu_counter_sum(&sbi->s_freeclusters_counter)));
ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
(long long) EXT4_C2B(EXT4_SB(sb),
percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
ext4_msg(sb, KERN_CRIT, "Block reservation details");
ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
ei->i_reserved_data_blocks);
return;
}
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
{
return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
}
/*
* This function is grabs code from the very beginning of
* ext4_map_blocks, but assumes that the caller is from delayed write
* time. This function looks up the requested blocks and sets the
* buffer delay bit under the protection of i_data_sem.
*/
static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
struct ext4_map_blocks *map,
struct buffer_head *bh)
{
struct extent_status es;
int retval;
sector_t invalid_block = ~((sector_t) 0xffff);
#ifdef ES_AGGRESSIVE_TEST
struct ext4_map_blocks orig_map;
memcpy(&orig_map, map, sizeof(*map));
#endif
if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
invalid_block = ~0;
map->m_flags = 0;
ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
"logical block %lu\n", inode->i_ino, map->m_len,
(unsigned long) map->m_lblk);
/* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, iblock, &es)) {
if (ext4_es_is_hole(&es)) {
retval = 0;
down_read(&EXT4_I(inode)->i_data_sem);
goto add_delayed;
}
/*
* Delayed extent could be allocated by fallocate.
* So we need to check it.
*/
if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
map_bh(bh, inode->i_sb, invalid_block);
set_buffer_new(bh);
set_buffer_delay(bh);
return 0;
}
map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
retval = es.es_len - (iblock - es.es_lblk);
if (retval > map->m_len)
retval = map->m_len;
map->m_len = retval;
if (ext4_es_is_written(&es))
map->m_flags |= EXT4_MAP_MAPPED;
else if (ext4_es_is_unwritten(&es))
map->m_flags |= EXT4_MAP_UNWRITTEN;
else
BUG_ON(1);
#ifdef ES_AGGRESSIVE_TEST
ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
#endif
return retval;
}
/*
* Try to see if we can get the block without requesting a new
* file system block.
*/
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_has_inline_data(inode))
retval = 0;
else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
retval = ext4_ext_map_blocks(NULL, inode, map, 0);
else
retval = ext4_ind_map_blocks(NULL, inode, map, 0);
add_delayed:
if (retval == 0) {
int ret;
/*
* XXX: __block_prepare_write() unmaps passed block,
* is it OK?
*/
/*
* If the block was allocated from previously allocated cluster,
* then we don't need to reserve it again. However we still need
* to reserve metadata for every block we're going to write.
*/
if (EXT4_SB(inode->i_sb)->s_cluster_ratio == 1 ||
!ext4_find_delalloc_cluster(inode, map->m_lblk)) {
ret = ext4_da_reserve_space(inode);
if (ret) {
/* not enough space to reserve */
retval = ret;
goto out_unlock;
}
}
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
~0, EXTENT_STATUS_DELAYED);
if (ret) {
retval = ret;
goto out_unlock;
}
map_bh(bh, inode->i_sb, invalid_block);
set_buffer_new(bh);
set_buffer_delay(bh);
} else if (retval > 0) {
int ret;
unsigned int status;
if (unlikely(retval != map->m_len)) {
ext4_warning(inode->i_sb,
"ES len assertion failed for inode "
"%lu: retval %d != map->m_len %d",
inode->i_ino, retval, map->m_len);
WARN_ON(1);
}
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
if (ret != 0)
retval = ret;
}
out_unlock:
up_read((&EXT4_I(inode)->i_data_sem));
return retval;
}
/*
* This is a special get_block_t callback which is used by
* ext4_da_write_begin(). It will either return mapped block or
* reserve space for a single block.
*
* For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
* We also have b_blocknr = -1 and b_bdev initialized properly
*
* For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
* We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
* initialized properly.
*/
int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
struct ext4_map_blocks map;
int ret = 0;
BUG_ON(create == 0);
BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
map.m_lblk = iblock;
map.m_len = 1;
/*
* first, we need to know whether the block is allocated already
* preallocated blocks are unmapped but should treated
* the same as allocated blocks.
*/
ret = ext4_da_map_blocks(inode, iblock, &map, bh);
if (ret <= 0)
return ret;
map_bh(bh, inode->i_sb, map.m_pblk);
ext4_update_bh_state(bh, map.m_flags);
if (buffer_unwritten(bh)) {
/* A delayed write to unwritten bh should be marked
* new and mapped. Mapped ensures that we don't do
* get_block multiple times when we write to the same
* offset and new ensures that we do proper zero out
* for partial write.
*/
set_buffer_new(bh);
set_buffer_mapped(bh);
}
return 0;
}
static int bget_one(handle_t *handle, struct buffer_head *bh)
{
get_bh(bh);
return 0;
}
static int bput_one(handle_t *handle, struct buffer_head *bh)
{
put_bh(bh);
return 0;
}
static int __ext4_journalled_writepage(struct page *page,
unsigned int len)
{
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
struct buffer_head *page_bufs = NULL;
handle_t *handle = NULL;
int ret = 0, err = 0;
int inline_data = ext4_has_inline_data(inode);
struct buffer_head *inode_bh = NULL;
ClearPageChecked(page);
if (inline_data) {
BUG_ON(page->index != 0);
BUG_ON(len > ext4_get_max_inline_size(inode));
inode_bh = ext4_journalled_write_inline_data(inode, len, page);
if (inode_bh == NULL)
goto out;
} else {
page_bufs = page_buffers(page);
if (!page_bufs) {
BUG();
goto out;
}
ext4_walk_page_buffers(handle, page_bufs, 0, len,
NULL, bget_one);
}
/*
* We need to release the page lock before we start the
* journal, so grab a reference so the page won't disappear
* out from under us.
*/
get_page(page);
unlock_page(page);
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
ext4_writepage_trans_blocks(inode));
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
put_page(page);
goto out_no_pagelock;
}
BUG_ON(!ext4_handle_valid(handle));
lock_page(page);
put_page(page);
if (page->mapping != mapping) {
/* The page got truncated from under us */
ext4_journal_stop(handle);
ret = 0;
goto out;
}
if (inline_data) {
BUFFER_TRACE(inode_bh, "get write access");
ret = ext4_journal_get_write_access(handle, inode_bh);
err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
} else {
ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
do_journal_get_write_access);
err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
write_end_fn);
}
if (ret == 0)
ret = err;
EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
if (!ext4_has_inline_data(inode))
ext4_walk_page_buffers(NULL, page_bufs, 0, len,
NULL, bput_one);
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
out:
unlock_page(page);
out_no_pagelock:
brelse(inode_bh);
return ret;
}
/*
* Note that we don't need to start a transaction unless we're journaling data
* because we should have holes filled from ext4_page_mkwrite(). We even don't
* need to file the inode to the transaction's list in ordered mode because if
* we are writing back data added by write(), the inode is already there and if
* we are writing back data modified via mmap(), no one guarantees in which
* transaction the data will hit the disk. In case we are journaling data, we
* cannot start transaction directly because transaction start ranks above page
* lock so we have to do some magic.
*
* This function can get called via...
* - ext4_writepages after taking page lock (have journal handle)
* - journal_submit_inode_data_buffers (no journal handle)
* - shrink_page_list via the kswapd/direct reclaim (no journal handle)
* - grab_page_cache when doing write_begin (have journal handle)
*
* We don't do any block allocation in this function. If we have page with
* multiple blocks we need to write those buffer_heads that are mapped. This
* is important for mmaped based write. So if we do with blocksize 1K
* truncate(f, 1024);
* a = mmap(f, 0, 4096);
* a[0] = 'a';
* truncate(f, 4096);
* we have in the page first buffer_head mapped via page_mkwrite call back
* but other buffer_heads would be unmapped but dirty (dirty done via the
* do_wp_page). So writepage should write the first block. If we modify
* the mmap area beyond 1024 we will again get a page_fault and the
* page_mkwrite callback will do the block allocation and mark the
* buffer_heads mapped.
*
* We redirty the page if we have any buffer_heads that is either delay or
* unwritten in the page.
*
* We can get recursively called as show below.
*
* ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
* ext4_writepage()
*
* But since we don't do any block allocation we should not deadlock.
* Page also have the dirty flag cleared so we don't get recurive page_lock.
*/
static int ext4_writepage(struct page *page,
struct writeback_control *wbc)
{
int ret = 0;
loff_t size;
unsigned int len;
struct buffer_head *page_bufs = NULL;
struct inode *inode = page->mapping->host;
struct ext4_io_submit io_submit;
bool keep_towrite = false;
trace_ext4_writepage(page);
size = i_size_read(inode);
if (page->index == size >> PAGE_SHIFT)
len = size & ~PAGE_MASK;
else
len = PAGE_SIZE;
page_bufs = page_buffers(page);
/*
* We cannot do block allocation or other extent handling in this
* function. If there are buffers needing that, we have to redirty
* the page. But we may reach here when we do a journal commit via
* journal_submit_inode_data_buffers() and in that case we must write
* allocated buffers to achieve data=ordered mode guarantees.
*
* Also, if there is only one buffer per page (the fs block
* size == the page size), if one buffer needs block
* allocation or needs to modify the extent tree to clear the
* unwritten flag, we know that the page can't be written at
* all, so we might as well refuse the write immediately.
* Unfortunately if the block size != page size, we can't as
* easily detect this case using ext4_walk_page_buffers(), but
* for the extremely common case, this is an optimization that
* skips a useless round trip through ext4_bio_write_page().
*/
if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
ext4_bh_delay_or_unwritten)) {
redirty_page_for_writepage(wbc, page);
if ((current->flags & PF_MEMALLOC) ||
(inode->i_sb->s_blocksize == PAGE_SIZE)) {
/*
* For memory cleaning there's no point in writing only
* some buffers. So just bail out. Warn if we came here
* from direct reclaim.
*/
WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
== PF_MEMALLOC);
unlock_page(page);
return 0;
}
keep_towrite = true;
}
if (PageChecked(page) && ext4_should_journal_data(inode))
/*
* It's mmapped pagecache. Add buffers and journal it. There
* doesn't seem much point in redirtying the page here.
*/
return __ext4_journalled_writepage(page, len);
ext4_io_submit_init(&io_submit, wbc);
io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
if (!io_submit.io_end) {
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return -ENOMEM;
}
ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
ext4_io_submit(&io_submit);
/* Drop io_end reference we got from init */
ext4_put_io_end_defer(io_submit.io_end);
return ret;
}
static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
{
int len;
loff_t size = i_size_read(mpd->inode);
int err;
BUG_ON(page->index != mpd->first_page);
if (page->index == size >> PAGE_SHIFT)
len = size & ~PAGE_MASK;
else
len = PAGE_SIZE;
clear_page_dirty_for_io(page);
err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
if (!err)
mpd->wbc->nr_to_write--;
mpd->first_page++;
return err;
}
#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
/*
* mballoc gives us at most this number of blocks...
* XXX: That seems to be only a limitation of ext4_mb_normalize_request().
* The rest of mballoc seems to handle chunks up to full group size.
*/
#define MAX_WRITEPAGES_EXTENT_LEN 2048
/*
* mpage_add_bh_to_extent - try to add bh to extent of blocks to map
*
* @mpd - extent of blocks
* @lblk - logical number of the block in the file
* @bh - buffer head we want to add to the extent
*
* The function is used to collect contig. blocks in the same state. If the
* buffer doesn't require mapping for writeback and we haven't started the
* extent of buffers to map yet, the function returns 'true' immediately - the
* caller can write the buffer right away. Otherwise the function returns true
* if the block has been added to the extent, false if the block couldn't be
* added.
*/
static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
struct buffer_head *bh)
{
struct ext4_map_blocks *map = &mpd->map;
/* Buffer that doesn't need mapping for writeback? */
if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
(!buffer_delay(bh) && !buffer_unwritten(bh))) {
/* So far no extent to map => we write the buffer right away */
if (map->m_len == 0)
return true;
return false;
}
/* First block in the extent? */
if (map->m_len == 0) {
map->m_lblk = lblk;
map->m_len = 1;
map->m_flags = bh->b_state & BH_FLAGS;
return true;
}
/* Don't go larger than mballoc is willing to allocate */
if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
return false;
/* Can we merge the block to our big extent? */
if (lblk == map->m_lblk + map->m_len &&
(bh->b_state & BH_FLAGS) == map->m_flags) {
map->m_len++;
return true;
}
return false;
}
/*
* mpage_process_page_bufs - submit page buffers for IO or add them to extent
*
* @mpd - extent of blocks for mapping
* @head - the first buffer in the page
* @bh - buffer we should start processing from
* @lblk - logical number of the block in the file corresponding to @bh
*
* Walk through page buffers from @bh upto @head (exclusive) and either submit
* the page for IO if all buffers in this page were mapped and there's no
* accumulated extent of buffers to map or add buffers in the page to the
* extent of buffers to map. The function returns 1 if the caller can continue
* by processing the next page, 0 if it should stop adding buffers to the
* extent to map because we cannot extend it anymore. It can also return value
* < 0 in case of error during IO submission.
*/
static int mpage_process_page_bufs(struct mpage_da_data *mpd,
struct buffer_head *head,
struct buffer_head *bh,
ext4_lblk_t lblk)
{
struct inode *inode = mpd->inode;
int err;
ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
>> inode->i_blkbits;
do {
BUG_ON(buffer_locked(bh));
if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
/* Found extent to map? */
if (mpd->map.m_len)
return 0;
/* Everything mapped so far and we hit EOF */
break;
}
} while (lblk++, (bh = bh->b_this_page) != head);
/* So far everything mapped? Submit the page for IO. */
if (mpd->map.m_len == 0) {
err = mpage_submit_page(mpd, head->b_page);
if (err < 0)
return err;
}
return lblk < blocks;
}
/*
* mpage_map_buffers - update buffers corresponding to changed extent and
* submit fully mapped pages for IO
*
* @mpd - description of extent to map, on return next extent to map
*
* Scan buffers corresponding to changed extent (we expect corresponding pages
* to be already locked) and update buffer state according to new extent state.
* We map delalloc buffers to their physical location, clear unwritten bits,
* and mark buffers as uninit when we perform writes to unwritten extents
* and do extent conversion after IO is finished. If the last page is not fully
* mapped, we update @map to the next extent in the last page that needs
* mapping. Otherwise we submit the page for IO.
*/
static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
{
struct pagevec pvec;
int nr_pages, i;
struct inode *inode = mpd->inode;
struct buffer_head *head, *bh;
int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
pgoff_t start, end;
ext4_lblk_t lblk;
sector_t pblock;
int err;
start = mpd->map.m_lblk >> bpp_bits;
end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
lblk = start << bpp_bits;
pblock = mpd->map.m_pblk;
pagevec_init(&pvec, 0);
while (start <= end) {
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start,
PAGEVEC_SIZE);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
if (page->index > end)
break;
/* Up to 'end' pages must be contiguous */
BUG_ON(page->index != start);
bh = head = page_buffers(page);
do {
if (lblk < mpd->map.m_lblk)
continue;
if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
/*
* Buffer after end of mapped extent.
* Find next buffer in the page to map.
*/
mpd->map.m_len = 0;
mpd->map.m_flags = 0;
/*
* FIXME: If dioread_nolock supports
* blocksize < pagesize, we need to make
* sure we add size mapped so far to
* io_end->size as the following call
* can submit the page for IO.
*/
err = mpage_process_page_bufs(mpd, head,
bh, lblk);
pagevec_release(&pvec);
if (err > 0)
err = 0;
return err;
}
if (buffer_delay(bh)) {
clear_buffer_delay(bh);
bh->b_blocknr = pblock++;
}
clear_buffer_unwritten(bh);
} while (lblk++, (bh = bh->b_this_page) != head);
/*
* FIXME: This is going to break if dioread_nolock
* supports blocksize < pagesize as we will try to
* convert potentially unmapped parts of inode.
*/
mpd->io_submit.io_end->size += PAGE_SIZE;
/* Page fully mapped - let IO run! */
err = mpage_submit_page(mpd, page);
if (err < 0) {
pagevec_release(&pvec);
return err;
}
start++;
}
pagevec_release(&pvec);
}
/* Extent fully mapped and matches with page boundary. We are done. */
mpd->map.m_len = 0;
mpd->map.m_flags = 0;
return 0;
}
static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
{
struct inode *inode = mpd->inode;
struct ext4_map_blocks *map = &mpd->map;
int get_blocks_flags;
int err, dioread_nolock;
trace_ext4_da_write_pages_extent(inode, map);
/*
* Call ext4_map_blocks() to allocate any delayed allocation blocks, or
* to convert an unwritten extent to be initialized (in the case
* where we have written into one or more preallocated blocks). It is
* possible that we're going to need more metadata blocks than
* previously reserved. However we must not fail because we're in
* writeback and there is nothing we can do about it so it might result
* in data loss. So use reserved blocks to allocate metadata if
* possible.
*
* We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
* the blocks in question are delalloc blocks. This indicates
* that the blocks and quotas has already been checked when
* the data was copied into the page cache.
*/
get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
EXT4_GET_BLOCKS_METADATA_NOFAIL;
dioread_nolock = ext4_should_dioread_nolock(inode);
if (dioread_nolock)
get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
if (map->m_flags & (1 << BH_Delay))
get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
if (err < 0)
return err;
if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
if (!mpd->io_submit.io_end->handle &&
ext4_handle_valid(handle)) {
mpd->io_submit.io_end->handle = handle->h_rsv_handle;
handle->h_rsv_handle = NULL;
}
ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
}
BUG_ON(map->m_len == 0);
if (map->m_flags & EXT4_MAP_NEW) {
struct block_device *bdev = inode->i_sb->s_bdev;
int i;
for (i = 0; i < map->m_len; i++)
unmap_underlying_metadata(bdev, map->m_pblk + i);
}
return 0;
}
/*
* mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
* mpd->len and submit pages underlying it for IO
*
* @handle - handle for journal operations
* @mpd - extent to map
* @give_up_on_write - we set this to true iff there is a fatal error and there
* is no hope of writing the data. The caller should discard
* dirty pages to avoid infinite loops.
*
* The function maps extent starting at mpd->lblk of length mpd->len. If it is
* delayed, blocks are allocated, if it is unwritten, we may need to convert
* them to initialized or split the described range from larger unwritten
* extent. Note that we need not map all the described range since allocation
* can return less blocks or the range is covered by more unwritten extents. We
* cannot map more because we are limited by reserved transaction credits. On
* the other hand we always make sure that the last touched page is fully
* mapped so that it can be written out (and thus forward progress is
* guaranteed). After mapping we submit all mapped pages for IO.
*/
static int mpage_map_and_submit_extent(handle_t *handle,
struct mpage_da_data *mpd,
bool *give_up_on_write)
{
struct inode *inode = mpd->inode;
struct ext4_map_blocks *map = &mpd->map;
int err;
loff_t disksize;
int progress = 0;
mpd->io_submit.io_end->offset =
((loff_t)map->m_lblk) << inode->i_blkbits;
do {
err = mpage_map_one_extent(handle, mpd);
if (err < 0) {
struct super_block *sb = inode->i_sb;
if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
goto invalidate_dirty_pages;
/*
* Let the uper layers retry transient errors.
* In the case of ENOSPC, if ext4_count_free_blocks()
* is non-zero, a commit should free up blocks.
*/
if ((err == -ENOMEM) ||
(err == -ENOSPC && ext4_count_free_clusters(sb))) {
if (progress)
goto update_disksize;
return err;
}
ext4_msg(sb, KERN_CRIT,
"Delayed block allocation failed for "
"inode %lu at logical offset %llu with"
" max blocks %u with error %d",
inode->i_ino,
(unsigned long long)map->m_lblk,
(unsigned)map->m_len, -err);
ext4_msg(sb, KERN_CRIT,
"This should not happen!! Data will "
"be lost\n");
if (err == -ENOSPC)
ext4_print_free_blocks(inode);
invalidate_dirty_pages:
*give_up_on_write = true;
return err;
}
progress = 1;
/*
* Update buffer state, submit mapped pages, and get us new
* extent to map
*/
err = mpage_map_and_submit_buffers(mpd);
if (err < 0)
goto update_disksize;
} while (map->m_len);
update_disksize:
/*
* Update on-disk size after IO is submitted. Races with
* truncate are avoided by checking i_size under i_data_sem.
*/
disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
if (disksize > EXT4_I(inode)->i_disksize) {
int err2;
loff_t i_size;
down_write(&EXT4_I(inode)->i_data_sem);
i_size = i_size_read(inode);
if (disksize > i_size)
disksize = i_size;
if (disksize > EXT4_I(inode)->i_disksize)
EXT4_I(inode)->i_disksize = disksize;
err2 = ext4_mark_inode_dirty(handle, inode);
up_write(&EXT4_I(inode)->i_data_sem);
if (err2)
ext4_error(inode->i_sb,
"Failed to mark inode %lu dirty",
inode->i_ino);
if (!err)
err = err2;
}
return err;
}
/*
* Calculate the total number of credits to reserve for one writepages
* iteration. This is called from ext4_writepages(). We map an extent of
* up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
* the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
* bpp - 1 blocks in bpp different extents.
*/
static int ext4_da_writepages_trans_blocks(struct inode *inode)
{
int bpp = ext4_journal_blocks_per_page(inode);
return ext4_meta_trans_blocks(inode,
MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
}
/*
* mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
* and underlying extent to map
*
* @mpd - where to look for pages
*
* Walk dirty pages in the mapping. If they are fully mapped, submit them for
* IO immediately. When we find a page which isn't mapped we start accumulating
* extent of buffers underlying these pages that needs mapping (formed by
* either delayed or unwritten buffers). We also lock the pages containing
* these buffers. The extent found is returned in @mpd structure (starting at
* mpd->lblk with length mpd->len blocks).
*
* Note that this function can attach bios to one io_end structure which are
* neither logically nor physically contiguous. Although it may seem as an
* unnecessary complication, it is actually inevitable in blocksize < pagesize
* case as we need to track IO to all buffers underlying a page in one io_end.
*/
static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
{
struct address_space *mapping = mpd->inode->i_mapping;
struct pagevec pvec;
unsigned int nr_pages;
long left = mpd->wbc->nr_to_write;
pgoff_t index = mpd->first_page;
pgoff_t end = mpd->last_page;
int tag;
int i, err = 0;
int blkbits = mpd->inode->i_blkbits;
ext4_lblk_t lblk;
struct buffer_head *head;
if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
pagevec_init(&pvec, 0);
mpd->map.m_len = 0;
mpd->next_page = index;
while (index <= end) {
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (nr_pages == 0)
goto out;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
/*
* At this point, the page may be truncated or
* invalidated (changing page->mapping to NULL), or
* even swizzled back from swapper_space to tmpfs file
* mapping. However, page->index will not change
* because we have a reference on the page.
*/
if (page->index > end)
goto out;
/*
* Accumulated enough dirty pages? This doesn't apply
* to WB_SYNC_ALL mode. For integrity sync we have to
* keep going because someone may be concurrently
* dirtying pages, and we might have synced a lot of
* newly appeared dirty pages, but have not synced all
* of the old dirty pages.
*/
if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
goto out;
/* If we can't merge this page, we are done. */
if (mpd->map.m_len > 0 && mpd->next_page != page->index)
goto out;
lock_page(page);
/*
* If the page is no longer dirty, or its mapping no
* longer corresponds to inode we are writing (which
* means it has been truncated or invalidated), or the
* page is already under writeback and we are not doing
* a data integrity writeback, skip the page
*/
if (!PageDirty(page) ||
(PageWriteback(page) &&
(mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
unlikely(page->mapping != mapping)) {
unlock_page(page);
continue;
}
wait_on_page_writeback(page);
BUG_ON(PageWriteback(page));
if (mpd->map.m_len == 0)
mpd->first_page = page->index;
mpd->next_page = page->index + 1;
/* Add all dirty buffers to mpd */
lblk = ((ext4_lblk_t)page->index) <<
(PAGE_SHIFT - blkbits);
head = page_buffers(page);
err = mpage_process_page_bufs(mpd, head, head, lblk);
if (err <= 0)
goto out;
err = 0;
left--;
}
pagevec_release(&pvec);
cond_resched();
}
return 0;
out:
pagevec_release(&pvec);
return err;
}
static int __writepage(struct page *page, struct writeback_control *wbc,
void *data)
{
struct address_space *mapping = data;
int ret = ext4_writepage(page, wbc);
mapping_set_error(mapping, ret);
return ret;
}
static int ext4_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
pgoff_t writeback_index = 0;
long nr_to_write = wbc->nr_to_write;
int range_whole = 0;
int cycled = 1;
handle_t *handle = NULL;
struct mpage_da_data mpd;
struct inode *inode = mapping->host;
int needed_blocks, rsv_blocks = 0, ret = 0;
struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
bool done;
struct blk_plug plug;
bool give_up_on_write = false;
trace_ext4_writepages(inode, wbc);
if (dax_mapping(mapping))
return dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev,
wbc);
/*
* No pages to write? This is mainly a kludge to avoid starting
* a transaction for special inodes like journal inode on last iput()
* because that could violate lock ordering on umount
*/
if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
goto out_writepages;
if (ext4_should_journal_data(inode)) {
struct blk_plug plug;
blk_start_plug(&plug);
ret = write_cache_pages(mapping, wbc, __writepage, mapping);
blk_finish_plug(&plug);
goto out_writepages;
}
/*
* If the filesystem has aborted, it is read-only, so return
* right away instead of dumping stack traces later on that
* will obscure the real source of the problem. We test
* EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
* the latter could be true if the filesystem is mounted
* read-only, and in that case, ext4_writepages should
* *never* be called, so if that ever happens, we would want
* the stack trace.
*/
if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) {
ret = -EROFS;
goto out_writepages;
}
if (ext4_should_dioread_nolock(inode)) {
/*
* We may need to convert up to one extent per block in
* the page and we may dirty the inode.
*/
rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
}
/*
* If we have inline data and arrive here, it means that
* we will soon create the block for the 1st page, so
* we'd better clear the inline data here.
*/
if (ext4_has_inline_data(inode)) {
/* Just inode will be modified... */
handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out_writepages;
}
BUG_ON(ext4_test_inode_state(inode,
EXT4_STATE_MAY_INLINE_DATA));
ext4_destroy_inline_data(handle, inode);
ext4_journal_stop(handle);
}
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index;
if (writeback_index)
cycled = 0;
mpd.first_page = writeback_index;
mpd.last_page = -1;
} else {
mpd.first_page = wbc->range_start >> PAGE_SHIFT;
mpd.last_page = wbc->range_end >> PAGE_SHIFT;
}
mpd.inode = inode;
mpd.wbc = wbc;
ext4_io_submit_init(&mpd.io_submit, wbc);
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
done = false;
blk_start_plug(&plug);
while (!done && mpd.first_page <= mpd.last_page) {
/* For each extent of pages we use new io_end */
mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
if (!mpd.io_submit.io_end) {
ret = -ENOMEM;
break;
}
/*
* We have two constraints: We find one extent to map and we
* must always write out whole page (makes a difference when
* blocksize < pagesize) so that we don't block on IO when we
* try to write out the rest of the page. Journalled mode is
* not supported by delalloc.
*/
BUG_ON(ext4_should_journal_data(inode));
needed_blocks = ext4_da_writepages_trans_blocks(inode);
/* start a new transaction */
handle = ext4_journal_start_with_reserve(inode,
EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
"%ld pages, ino %lu; err %d", __func__,
wbc->nr_to_write, inode->i_ino, ret);
/* Release allocated io_end */
ext4_put_io_end(mpd.io_submit.io_end);
break;
}
trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
ret = mpage_prepare_extent_to_map(&mpd);
if (!ret) {
if (mpd.map.m_len)
ret = mpage_map_and_submit_extent(handle, &mpd,
&give_up_on_write);
else {
/*
* We scanned the whole range (or exhausted
* nr_to_write), submitted what was mapped and
* didn't find anything needing mapping. We are
* done.
*/
done = true;
}
}
ext4_journal_stop(handle);
/* Submit prepared bio */
ext4_io_submit(&mpd.io_submit);
/* Unlock pages we didn't use */
mpage_release_unused_pages(&mpd, give_up_on_write);
/* Drop our io_end reference we got from init */
ext4_put_io_end(mpd.io_submit.io_end);
if (ret == -ENOSPC && sbi->s_journal) {
/*
* Commit the transaction which would
* free blocks released in the transaction
* and try again
*/
jbd2_journal_force_commit_nested(sbi->s_journal);
ret = 0;
continue;
}
/* Fatal error - ENOMEM, EIO... */
if (ret)
break;
}
blk_finish_plug(&plug);
if (!ret && !cycled && wbc->nr_to_write > 0) {
cycled = 1;
mpd.last_page = writeback_index - 1;
mpd.first_page = 0;
goto retry;
}
/* Update index */
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
/*
* Set the writeback_index so that range_cyclic
* mode will write it back later
*/
mapping->writeback_index = mpd.first_page;
out_writepages:
trace_ext4_writepages_result(inode, wbc, ret,
nr_to_write - wbc->nr_to_write);
return ret;
}
static int ext4_nonda_switch(struct super_block *sb)
{
s64 free_clusters, dirty_clusters;
struct ext4_sb_info *sbi = EXT4_SB(sb);
/*
* switch to non delalloc mode if we are running low
* on free block. The free block accounting via percpu
* counters can get slightly wrong with percpu_counter_batch getting
* accumulated on each CPU without updating global counters
* Delalloc need an accurate free block accounting. So switch
* to non delalloc when we are near to error range.
*/
free_clusters =
percpu_counter_read_positive(&sbi->s_freeclusters_counter);
dirty_clusters =
percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
/*
* Start pushing delalloc when 1/2 of free blocks are dirty.
*/
if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
if (2 * free_clusters < 3 * dirty_clusters ||
free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
/*
* free block count is less than 150% of dirty blocks
* or free blocks is less than watermark
*/
return 1;
}
return 0;
}
/* We always reserve for an inode update; the superblock could be there too */
static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
{
if (likely(ext4_has_feature_large_file(inode->i_sb)))
return 1;
if (pos + len <= 0x7fffffffULL)
return 1;
/* We might need to update the superblock to set LARGE_FILE */
return 2;
}
static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
int ret, retries = 0;
struct page *page;
pgoff_t index;
struct inode *inode = mapping->host;
handle_t *handle;
index = pos >> PAGE_SHIFT;
if (ext4_nonda_switch(inode->i_sb)) {
*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
return ext4_write_begin(file, mapping, pos,
len, flags, pagep, fsdata);
}
*fsdata = (void *)0;
trace_ext4_da_write_begin(inode, pos, len, flags);
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
ret = ext4_da_write_inline_data_begin(mapping, inode,
pos, len, flags,
pagep, fsdata);
if (ret < 0)
return ret;
if (ret == 1)
return 0;
}
/*
* grab_cache_page_write_begin() can take a long time if the
* system is thrashing due to memory pressure, or if the page
* is being written back. So grab it first before we start
* the transaction handle. This also allows us to allocate
* the page (if needed) without using GFP_NOFS.
*/
retry_grab:
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
unlock_page(page);
/*
* With delayed allocation, we don't log the i_disksize update
* if there is delayed block allocation. But we still need
* to journalling the i_disksize update if writes to the end
* of file which has an already mapped buffer.
*/
retry_journal:
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
ext4_da_write_credits(inode, pos, len));
if (IS_ERR(handle)) {
put_page(page);
return PTR_ERR(handle);
}
lock_page(page);
if (page->mapping != mapping) {
/* The page got truncated from under us */
unlock_page(page);
put_page(page);
ext4_journal_stop(handle);
goto retry_grab;
}
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
ret = ext4_block_write_begin(page, pos, len,
ext4_da_get_block_prep);
#else
ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
#endif
if (ret < 0) {
unlock_page(page);
ext4_journal_stop(handle);
/*
* block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
* i_size_read because we hold i_mutex.
*/
if (pos + len > inode->i_size)
ext4_truncate_failed_write(inode);
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_journal;
put_page(page);
return ret;
}
*pagep = page;
return ret;
}
/*
* Check if we should update i_disksize
* when write to the end of file but not require block allocation
*/
static int ext4_da_should_update_i_disksize(struct page *page,
unsigned long offset)
{
struct buffer_head *bh;
struct inode *inode = page->mapping->host;
unsigned int idx;
int i;
bh = page_buffers(page);
idx = offset >> inode->i_blkbits;
for (i = 0; i < idx; i++)
bh = bh->b_this_page;
if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
return 0;
return 1;
}
static int ext4_da_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
int ret = 0, ret2;
handle_t *handle = ext4_journal_current_handle();
loff_t new_i_size;
unsigned long start, end;
int write_mode = (int)(unsigned long)fsdata;
if (write_mode == FALL_BACK_TO_NONDELALLOC)
return ext4_write_end(file, mapping, pos,
len, copied, page, fsdata);
trace_ext4_da_write_end(inode, pos, len, copied);
start = pos & (PAGE_SIZE - 1);
end = start + copied - 1;
/*
* generic_write_end() will run mark_inode_dirty() if i_size
* changes. So let's piggyback the i_disksize mark_inode_dirty
* into that.
*/
new_i_size = pos + copied;
if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
if (ext4_has_inline_data(inode) ||
ext4_da_should_update_i_disksize(page, end)) {
ext4_update_i_disksize(inode, new_i_size);
/* We need to mark inode dirty even if
* new_i_size is less that inode->i_size
* bu greater than i_disksize.(hint delalloc)
*/
ext4_mark_inode_dirty(handle, inode);
}
}
if (write_mode != CONVERT_INLINE_DATA &&
ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
ext4_has_inline_data(inode))
ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
page);
else
ret2 = generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
copied = ret2;
if (ret2 < 0)
ret = ret2;
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
return ret ? ret : copied;
}
static void ext4_da_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
/*
* Drop reserved blocks
*/
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
goto out;
ext4_da_page_release_reservation(page, offset, length);
out:
ext4_invalidatepage(page, offset, length);
return;
}
/*
* Force all delayed allocation blocks to be allocated for a given inode.
*/
int ext4_alloc_da_blocks(struct inode *inode)
{
trace_ext4_alloc_da_blocks(inode);
if (!EXT4_I(inode)->i_reserved_data_blocks)
return 0;
/*
* We do something simple for now. The filemap_flush() will
* also start triggering a write of the data blocks, which is
* not strictly speaking necessary (and for users of
* laptop_mode, not even desirable). However, to do otherwise
* would require replicating code paths in:
*
* ext4_writepages() ->
* write_cache_pages() ---> (via passed in callback function)
* __mpage_da_writepage() -->
* mpage_add_bh_to_extent()
* mpage_da_map_blocks()
*
* The problem is that write_cache_pages(), located in
* mm/page-writeback.c, marks pages clean in preparation for
* doing I/O, which is not desirable if we're not planning on
* doing I/O at all.
*
* We could call write_cache_pages(), and then redirty all of
* the pages by calling redirty_page_for_writepage() but that
* would be ugly in the extreme. So instead we would need to
* replicate parts of the code in the above functions,
* simplifying them because we wouldn't actually intend to
* write out the pages, but rather only collect contiguous
* logical block extents, call the multi-block allocator, and
* then update the buffer heads with the block allocations.
*
* For now, though, we'll cheat by calling filemap_flush(),
* which will map the blocks, and start the I/O, but not
* actually wait for the I/O to complete.
*/
return filemap_flush(inode->i_mapping);
}
/*
* bmap() is special. It gets used by applications such as lilo and by
* the swapper to find the on-disk block of a specific piece of data.
*
* Naturally, this is dangerous if the block concerned is still in the
* journal. If somebody makes a swapfile on an ext4 data-journaling
* filesystem and enables swap, then they may get a nasty shock when the
* data getting swapped to that swapfile suddenly gets overwritten by
* the original zero's written out previously to the journal and
* awaiting writeback in the kernel's buffer cache.
*
* So, if we see any bmap calls here on a modified, data-journaled file,
* take extra steps to flush any blocks which might be in the cache.
*/
static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
journal_t *journal;
int err;
/*
* We can get here for an inline file via the FIBMAP ioctl
*/
if (ext4_has_inline_data(inode))
return 0;
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
test_opt(inode->i_sb, DELALLOC)) {
/*
* With delalloc we want to sync the file
* so that we can make sure we allocate
* blocks for file
*/
filemap_write_and_wait(mapping);
}
if (EXT4_JOURNAL(inode) &&
ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
/*
* This is a REALLY heavyweight approach, but the use of
* bmap on dirty files is expected to be extremely rare:
* only if we run lilo or swapon on a freshly made file
* do we expect this to happen.
*
* (bmap requires CAP_SYS_RAWIO so this does not
* represent an unprivileged user DOS attack --- we'd be
* in trouble if mortal users could trigger this path at
* will.)
*
* NB. EXT4_STATE_JDATA is not set on files other than
* regular files. If somebody wants to bmap a directory
* or symlink and gets confused because the buffer
* hasn't yet been flushed to disk, they deserve
* everything they get.
*/
ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
journal = EXT4_JOURNAL(inode);
jbd2_journal_lock_updates(journal);
err = jbd2_journal_flush(journal);
jbd2_journal_unlock_updates(journal);
if (err)
return 0;
}
return generic_block_bmap(mapping, block, ext4_get_block);
}
static int ext4_readpage(struct file *file, struct page *page)
{
int ret = -EAGAIN;
struct inode *inode = page->mapping->host;
trace_ext4_readpage(page);
if (ext4_has_inline_data(inode))
ret = ext4_readpage_inline(inode, page);
if (ret == -EAGAIN)
return ext4_mpage_readpages(page->mapping, NULL, page, 1);
return ret;
}
static int
ext4_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct inode *inode = mapping->host;
/* If the file has inline data, no need to do readpages. */
if (ext4_has_inline_data(inode))
return 0;
return ext4_mpage_readpages(mapping, pages, NULL, nr_pages);
}
static void ext4_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
trace_ext4_invalidatepage(page, offset, length);
/* No journalling happens on data buffers when this function is used */
WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
block_invalidatepage(page, offset, length);
}
static int __ext4_journalled_invalidatepage(struct page *page,
unsigned int offset,
unsigned int length)
{
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
trace_ext4_journalled_invalidatepage(page, offset, length);
/*
* If it's a full truncate we just forget about the pending dirtying
*/
if (offset == 0 && length == PAGE_SIZE)
ClearPageChecked(page);
return jbd2_journal_invalidatepage(journal, page, offset, length);
}
/* Wrapper for aops... */
static void ext4_journalled_invalidatepage(struct page *page,
unsigned int offset,
unsigned int length)
{
WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
}
static int ext4_releasepage(struct page *page, gfp_t wait)
{
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
trace_ext4_releasepage(page);
/* Page has dirty journalled data -> cannot release */
if (PageChecked(page))
return 0;
if (journal)
return jbd2_journal_try_to_free_buffers(journal, page, wait);
else
return try_to_free_buffers(page);
}
#ifdef CONFIG_FS_DAX
int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
int ret, err;
int credits;
struct ext4_map_blocks map;
handle_t *handle = NULL;
int flags = 0;
ext4_debug("ext4_dax_mmap_get_block: inode %lu, create flag %d\n",
inode->i_ino, create);
map.m_lblk = iblock;
map.m_len = bh_result->b_size >> inode->i_blkbits;
credits = ext4_chunk_trans_blocks(inode, map.m_len);
if (create) {
flags |= EXT4_GET_BLOCKS_PRE_IO | EXT4_GET_BLOCKS_CREATE_ZERO;
handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
return ret;
}
}
ret = ext4_map_blocks(handle, inode, &map, flags);
if (create) {
err = ext4_journal_stop(handle);
if (ret >= 0 && err < 0)
ret = err;
}
if (ret <= 0)
goto out;
if (map.m_flags & EXT4_MAP_UNWRITTEN) {
int err2;
/*
* We are protected by i_mmap_sem so we know block cannot go
* away from under us even though we dropped i_data_sem.
* Convert extent to written and write zeros there.
*
* Note: We may get here even when create == 0.
*/
handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
}
err = ext4_map_blocks(handle, inode, &map,
EXT4_GET_BLOCKS_CONVERT | EXT4_GET_BLOCKS_CREATE_ZERO);
if (err < 0)
ret = err;
err2 = ext4_journal_stop(handle);
if (err2 < 0 && ret > 0)
ret = err2;
}
out:
WARN_ON_ONCE(ret == 0 && create);
if (ret > 0) {
map_bh(bh_result, inode->i_sb, map.m_pblk);
/*
* At least for now we have to clear BH_New so that DAX code
* doesn't attempt to zero blocks again in a racy way.
*/
map.m_flags &= ~EXT4_MAP_NEW;
ext4_update_bh_state(bh_result, map.m_flags);
bh_result->b_size = map.m_len << inode->i_blkbits;
ret = 0;
}
return ret;
}
#endif
static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
ssize_t size, void *private)
{
ext4_io_end_t *io_end = private;
/* if not async direct IO just return */
if (!io_end)
return 0;
ext_debug("ext4_end_io_dio(): io_end 0x%p "
"for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
io_end, io_end->inode->i_ino, iocb, offset, size);
/*
* Error during AIO DIO. We cannot convert unwritten extents as the
* data was not written. Just clear the unwritten flag and drop io_end.
*/
if (size <= 0) {
ext4_clear_io_unwritten_flag(io_end);
size = 0;
}
io_end->offset = offset;
io_end->size = size;
ext4_put_io_end(io_end);
return 0;
}
/*
* For ext4 extent files, ext4 will do direct-io write to holes,
* preallocated extents, and those write extend the file, no need to
* fall back to buffered IO.
*
* For holes, we fallocate those blocks, mark them as unwritten
* If those blocks were preallocated, we mark sure they are split, but
* still keep the range to write as unwritten.
*
* The unwritten extents will be converted to written when DIO is completed.
* For async direct IO, since the IO may still pending when return, we
* set up an end_io call back function, which will do the conversion
* when async direct IO completed.
*
* If the O_DIRECT write will extend the file then add this inode to the
* orphan list. So recovery will truncate it back to the original size
* if the machine crashes during the write.
*
*/
static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
ssize_t ret;
size_t count = iov_iter_count(iter);
int overwrite = 0;
get_block_t *get_block_func = NULL;
int dio_flags = 0;
loff_t final_size = offset + count;
/* Use the old path for reads and writes beyond i_size. */
if (iov_iter_rw(iter) != WRITE || final_size > inode->i_size)
return ext4_ind_direct_IO(iocb, iter, offset);
BUG_ON(iocb->private == NULL);
/*
* Make all waiters for direct IO properly wait also for extent
* conversion. This also disallows race between truncate() and
* overwrite DIO as i_dio_count needs to be incremented under i_mutex.
*/
if (iov_iter_rw(iter) == WRITE)
inode_dio_begin(inode);
/* If we do a overwrite dio, i_mutex locking can be released */
overwrite = *((int *)iocb->private);
if (overwrite)
inode_unlock(inode);
/*
* We could direct write to holes and fallocate.
*
* Allocated blocks to fill the hole are marked as unwritten to prevent
* parallel buffered read to expose the stale data before DIO complete
* the data IO.
*
* As to previously fallocated extents, ext4 get_block will just simply
* mark the buffer mapped but still keep the extents unwritten.
*
* For non AIO case, we will convert those unwritten extents to written
* after return back from blockdev_direct_IO. That way we save us from
* allocating io_end structure and also the overhead of offloading
* the extent convertion to a workqueue.
*
* For async DIO, the conversion needs to be deferred when the
* IO is completed. The ext4 end_io callback function will be
* called to take care of the conversion work. Here for async
* case, we allocate an io_end structure to hook to the iocb.
*/
iocb->private = NULL;
if (overwrite)
get_block_func = ext4_dio_get_block_overwrite;
else if (is_sync_kiocb(iocb)) {
get_block_func = ext4_dio_get_block_unwritten_sync;
dio_flags = DIO_LOCKING;
} else {
get_block_func = ext4_dio_get_block_unwritten_async;
dio_flags = DIO_LOCKING;
}
#ifdef CONFIG_EXT4_FS_ENCRYPTION
BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
#endif
if (IS_DAX(inode))
ret = dax_do_io(iocb, inode, iter, offset, get_block_func,
ext4_end_io_dio, dio_flags);
else
ret = __blockdev_direct_IO(iocb, inode,
inode->i_sb->s_bdev, iter, offset,
get_block_func,
ext4_end_io_dio, NULL, dio_flags);
if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN)) {
int err;
/*
* for non AIO case, since the IO is already
* completed, we could do the conversion right here
*/
err = ext4_convert_unwritten_extents(NULL, inode,
offset, ret);
if (err < 0)
ret = err;
ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
}
if (iov_iter_rw(iter) == WRITE)
inode_dio_end(inode);
/* take i_mutex locking again if we do a ovewrite dio */
if (overwrite)
inode_lock(inode);
return ret;
}
static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
size_t count = iov_iter_count(iter);
ssize_t ret;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
return 0;
#endif
/*
* If we are doing data journalling we don't support O_DIRECT
*/
if (ext4_should_journal_data(inode))
return 0;
/* Let buffer I/O handle the inline data case. */
if (ext4_has_inline_data(inode))
return 0;
trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_direct_IO(iocb, iter, offset);
else
ret = ext4_ind_direct_IO(iocb, iter, offset);
trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
return ret;
}
/*
* Pages can be marked dirty completely asynchronously from ext4's journalling
* activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
* much here because ->set_page_dirty is called under VFS locks. The page is
* not necessarily locked.
*
* We cannot just dirty the page and leave attached buffers clean, because the
* buffers' dirty state is "definitive". We cannot just set the buffers dirty
* or jbddirty because all the journalling code will explode.
*
* So what we do is to mark the page "pending dirty" and next time writepage
* is called, propagate that into the buffers appropriately.
*/
static int ext4_journalled_set_page_dirty(struct page *page)
{
SetPageChecked(page);
return __set_page_dirty_nobuffers(page);
}
static const struct address_space_operations ext4_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
.writepages = ext4_writepages,
.write_begin = ext4_write_begin,
.write_end = ext4_write_end,
.bmap = ext4_bmap,
.invalidatepage = ext4_invalidatepage,
.releasepage = ext4_releasepage,
.direct_IO = ext4_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations ext4_journalled_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
.writepages = ext4_writepages,
.write_begin = ext4_write_begin,
.write_end = ext4_journalled_write_end,
.set_page_dirty = ext4_journalled_set_page_dirty,
.bmap = ext4_bmap,
.invalidatepage = ext4_journalled_invalidatepage,
.releasepage = ext4_releasepage,
.direct_IO = ext4_direct_IO,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations ext4_da_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
.writepages = ext4_writepages,
.write_begin = ext4_da_write_begin,
.write_end = ext4_da_write_end,
.bmap = ext4_bmap,
.invalidatepage = ext4_da_invalidatepage,
.releasepage = ext4_releasepage,
.direct_IO = ext4_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
void ext4_set_aops(struct inode *inode)
{
switch (ext4_inode_journal_mode(inode)) {
case EXT4_INODE_ORDERED_DATA_MODE:
ext4_set_inode_state(inode, EXT4_STATE_ORDERED_MODE);
break;
case EXT4_INODE_WRITEBACK_DATA_MODE:
ext4_clear_inode_state(inode, EXT4_STATE_ORDERED_MODE);
break;
case EXT4_INODE_JOURNAL_DATA_MODE:
inode->i_mapping->a_ops = &ext4_journalled_aops;
return;
default:
BUG();
}
if (test_opt(inode->i_sb, DELALLOC))
inode->i_mapping->a_ops = &ext4_da_aops;
else
inode->i_mapping->a_ops = &ext4_aops;
}
static int __ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length)
{
ext4_fsblk_t index = from >> PAGE_SHIFT;
unsigned offset = from & (PAGE_SIZE-1);
unsigned blocksize, pos;
ext4_lblk_t iblock;
struct inode *inode = mapping->host;
struct buffer_head *bh;
struct page *page;
int err = 0;
page = find_or_create_page(mapping, from >> PAGE_SHIFT,
mapping_gfp_constraint(mapping, ~__GFP_FS));
if (!page)
return -ENOMEM;
blocksize = inode->i_sb->s_blocksize;
iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
/* Find the buffer that contains "offset" */
bh = page_buffers(page);
pos = blocksize;
while (offset >= pos) {
bh = bh->b_this_page;
iblock++;
pos += blocksize;
}
if (buffer_freed(bh)) {
BUFFER_TRACE(bh, "freed: skip");
goto unlock;
}
if (!buffer_mapped(bh)) {
BUFFER_TRACE(bh, "unmapped");
ext4_get_block(inode, iblock, bh, 0);
/* unmapped? It's a hole - nothing to do */
if (!buffer_mapped(bh)) {
BUFFER_TRACE(bh, "still unmapped");
goto unlock;
}
}
/* Ok, it's mapped. Make sure it's up-to-date */
if (PageUptodate(page))
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) {
err = -EIO;
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
goto unlock;
if (S_ISREG(inode->i_mode) &&
ext4_encrypted_inode(inode)) {
/* We expect the key to be set. */
BUG_ON(!ext4_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);
WARN_ON_ONCE(ext4_decrypt(page));
}
}
if (ext4_should_journal_data(inode)) {
BUFFER_TRACE(bh, "get write access");
err = ext4_journal_get_write_access(handle, bh);
if (err)
goto unlock;
}
zero_user(page, offset, length);
BUFFER_TRACE(bh, "zeroed end of block");
if (ext4_should_journal_data(inode)) {
err = ext4_handle_dirty_metadata(handle, inode, bh);
} else {
err = 0;
mark_buffer_dirty(bh);
if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE))
err = ext4_jbd2_file_inode(handle, inode);
}
unlock:
unlock_page(page);
put_page(page);
return err;
}
/*
* ext4_block_zero_page_range() zeros out a mapping of length 'length'
* starting from file offset 'from'. The range to be zero'd must
* be contained with in one block. If the specified range exceeds
* the end of the block it will be shortened to end of the block
* that cooresponds to 'from'
*/
static int ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length)
{
struct inode *inode = mapping->host;
unsigned offset = from & (PAGE_SIZE-1);
unsigned blocksize = inode->i_sb->s_blocksize;
unsigned max = blocksize - (offset & (blocksize - 1));
/*
* correct length if it does not fall between
* 'from' and the end of the block
*/
if (length > max || length < 0)
length = max;
if (IS_DAX(inode))
return dax_zero_page_range(inode, from, length, ext4_get_block);
return __ext4_block_zero_page_range(handle, mapping, from, length);
}
/*
* ext4_block_truncate_page() zeroes out a mapping from file offset `from'
* up to the end of the block which corresponds to `from'.
* This required during truncate. We need to physically zero the tail end
* of that block so it doesn't yield old data if the file is later grown.
*/
static int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from)
{
unsigned offset = from & (PAGE_SIZE-1);
unsigned length;
unsigned blocksize;
struct inode *inode = mapping->host;
blocksize = inode->i_sb->s_blocksize;
length = blocksize - (offset & (blocksize - 1));
return ext4_block_zero_page_range(handle, mapping, from, length);
}
int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
loff_t lstart, loff_t length)
{
struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
unsigned partial_start, partial_end;
ext4_fsblk_t start, end;
loff_t byte_end = (lstart + length - 1);
int err = 0;
partial_start = lstart & (sb->s_blocksize - 1);
partial_end = byte_end & (sb->s_blocksize - 1);
start = lstart >> sb->s_blocksize_bits;
end = byte_end >> sb->s_blocksize_bits;
/* Handle partial zero within the single block */
if (start == end &&
(partial_start || (partial_end != sb->s_blocksize - 1))) {
err = ext4_block_zero_page_range(handle, mapping,
lstart, length);
return err;
}
/* Handle partial zero out on the start of the range */
if (partial_start) {
err = ext4_block_zero_page_range(handle, mapping,
lstart, sb->s_blocksize);
if (err)
return err;
}
/* Handle partial zero out on the end of the range */
if (partial_end != sb->s_blocksize - 1)
err = ext4_block_zero_page_range(handle, mapping,
byte_end - partial_end,
partial_end + 1);
return err;
}
int ext4_can_truncate(struct inode *inode)
{
if (S_ISREG(inode->i_mode))
return 1;
if (S_ISDIR(inode->i_mode))
return 1;
if (S_ISLNK(inode->i_mode))
return !ext4_inode_is_fast_symlink(inode);
return 0;
}
/*
* We have to make sure i_disksize gets properly updated before we truncate
* page cache due to hole punching or zero range. Otherwise i_disksize update
* can get lost as it may have been postponed to submission of writeback but
* that will never happen after we truncate page cache.
*/
int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
loff_t len)
{
handle_t *handle;
loff_t size = i_size_read(inode);
WARN_ON(!inode_is_locked(inode));
if (offset > size || offset + len < size)
return 0;
if (EXT4_I(inode)->i_disksize >= size)
return 0;
handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
if (IS_ERR(handle))
return PTR_ERR(handle);
ext4_update_i_disksize(inode, size);
ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
return 0;
}
/*
* ext4_punch_hole: punches a hole in a file by releaseing the blocks
* associated with the given offset and length
*
* @inode: File inode
* @offset: The offset where the hole will begin
* @len: The length of the hole
*
* Returns: 0 on success or negative on failure
*/
int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
{
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
struct address_space *mapping = inode->i_mapping;
loff_t first_block_offset, last_block_offset;
handle_t *handle;
unsigned int credits;
int ret = 0;
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
trace_ext4_punch_hole(inode, offset, length, 0);
/*
* Write out all dirty pages to avoid race conditions
* Then release them.
*/
if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
ret = filemap_write_and_wait_range(mapping, offset,
offset + length - 1);
if (ret)
return ret;
}
inode_lock(inode);
/* No need to punch hole beyond i_size */
if (offset >= inode->i_size)
goto out_mutex;
/*
* If the hole extends beyond i_size, set the hole
* to end after the page that contains i_size
*/
if (offset + length > inode->i_size) {
length = inode->i_size +
PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
offset;
}
if (offset & (sb->s_blocksize - 1) ||
(offset + length) & (sb->s_blocksize - 1)) {
/*
* Attach jinode to inode for jbd2 if we do any zeroing of
* partial block
*/
ret = ext4_inode_attach_jinode(inode);
if (ret < 0)
goto out_mutex;
}
/* Wait all existing dio workers, newcomers will block on i_mutex */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
/*
* Prevent page faults from reinstantiating pages we have released from
* page cache.
*/
down_write(&EXT4_I(inode)->i_mmap_sem);
first_block_offset = round_up(offset, sb->s_blocksize);
last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
/* Now release the pages and zero block aligned part of pages*/
if (last_block_offset > first_block_offset) {
ret = ext4_update_disksize_before_punch(inode, offset, length);
if (ret)
goto out_dio;
truncate_pagecache_range(inode, first_block_offset,
last_block_offset);
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
else
credits = ext4_blocks_for_truncate(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(sb, ret);
goto out_dio;
}
ret = ext4_zero_partial_blocks(handle, inode, offset,
length);
if (ret)
goto out_stop;
first_block = (offset + sb->s_blocksize - 1) >>
EXT4_BLOCK_SIZE_BITS(sb);
stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
/* If there are no blocks to remove, return now */
if (first_block >= stop_block)
goto out_stop;
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
ret = ext4_es_remove_extent(inode, first_block,
stop_block - first_block);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_remove_space(inode, first_block,
stop_block - 1);
else
ret = ext4_ind_remove_space(handle, inode, first_block,
stop_block);
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
out_stop:
ext4_journal_stop(handle);
out_dio:
up_write(&EXT4_I(inode)->i_mmap_sem);
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
inode_unlock(inode);
return ret;
}
int ext4_inode_attach_jinode(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct jbd2_inode *jinode;
if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
return 0;
jinode = jbd2_alloc_inode(GFP_KERNEL);
spin_lock(&inode->i_lock);
if (!ei->jinode) {
if (!jinode) {
spin_unlock(&inode->i_lock);
return -ENOMEM;
}
ei->jinode = jinode;
jbd2_journal_init_jbd_inode(ei->jinode, inode);
jinode = NULL;
}
spin_unlock(&inode->i_lock);
if (unlikely(jinode != NULL))
jbd2_free_inode(jinode);
return 0;
}
/*
* ext4_truncate()
*
* We block out ext4_get_block() block instantiations across the entire
* transaction, and VFS/VM ensures that ext4_truncate() cannot run
* simultaneously on behalf of the same inode.
*
* As we work through the truncate and commit bits of it to the journal there
* is one core, guiding principle: the file's tree must always be consistent on
* disk. We must be able to restart the truncate after a crash.
*
* The file's tree may be transiently inconsistent in memory (although it
* probably isn't), but whenever we close off and commit a journal transaction,
* the contents of (the filesystem + the journal) must be consistent and
* restartable. It's pretty simple, really: bottom up, right to left (although
* left-to-right works OK too).
*
* Note that at recovery time, journal replay occurs *before* the restart of
* truncate against the orphan inode list.
*
* The committed inode has the new, desired i_size (which is the same as
* i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
* that this inode's truncate did not complete and it will again call
* ext4_truncate() to have another go. So there will be instantiated blocks
* to the right of the truncation point in a crashed ext4 filesystem. But
* that's fine - as long as they are linked from the inode, the post-crash
* ext4_truncate() run will find them and release them.
*/
void ext4_truncate(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int credits;
handle_t *handle;
struct address_space *mapping = inode->i_mapping;
/*
* There is a possibility that we're either freeing the inode
* or it's a completely new inode. In those cases we might not
* have i_mutex locked because it's not necessary.
*/
if (!(inode->i_state & (I_NEW|I_FREEING)))
WARN_ON(!inode_is_locked(inode));
trace_ext4_truncate_enter(inode);
if (!ext4_can_truncate(inode))
return;
ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
if (ext4_has_inline_data(inode)) {
int has_inline = 1;
ext4_inline_data_truncate(inode, &has_inline);
if (has_inline)
return;
}
/* If we zero-out tail of the page, we have to create jinode for jbd2 */
if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
if (ext4_inode_attach_jinode(inode) < 0)
return;
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
else
credits = ext4_blocks_for_truncate(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ext4_std_error(inode->i_sb, PTR_ERR(handle));
return;
}
if (inode->i_size & (inode->i_sb->s_blocksize - 1))
ext4_block_truncate_page(handle, mapping, inode->i_size);
/*
* We add the inode to the orphan list, so that if this
* truncate spans multiple transactions, and we crash, we will
* resume the truncate when the filesystem recovers. It also
* marks the inode dirty, to catch the new size.
*
* Implication: the file must always be in a sane, consistent
* truncatable state while each transaction commits.
*/
if (ext4_orphan_add(handle, inode))
goto out_stop;
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ext4_ext_truncate(handle, inode);
else
ext4_ind_truncate(handle, inode);
up_write(&ei->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
out_stop:
/*
* If this was a simple ftruncate() and the file will remain alive,
* then we need to clear up the orphan record which we created above.
* However, if this was a real unlink then we were called by
* ext4_evict_inode(), and we allow that function to clean up the
* orphan info for us.
*/
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
trace_ext4_truncate_exit(inode);
}
/*
* ext4_get_inode_loc returns with an extra refcount against the inode's
* underlying buffer_head on success. If 'in_mem' is true, we have all
* data in memory that is needed to recreate the on-disk version of this
* inode.
*/
static int __ext4_get_inode_loc(struct inode *inode,
struct ext4_iloc *iloc, int in_mem)
{
struct ext4_group_desc *gdp;
struct buffer_head *bh;
struct super_block *sb = inode->i_sb;
ext4_fsblk_t block;
int inodes_per_block, inode_offset;
iloc->bh = NULL;
if (!ext4_valid_inum(sb, inode->i_ino))
return -EFSCORRUPTED;
iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
if (!gdp)
return -EIO;
/*
* Figure out the offset within the block group inode table
*/
inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
inode_offset = ((inode->i_ino - 1) %
EXT4_INODES_PER_GROUP(sb));
block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
bh = sb_getblk(sb, block);
if (unlikely(!bh))
return -ENOMEM;
if (!buffer_uptodate(bh)) {
lock_buffer(bh);
/*
* If the buffer has the write error flag, we have failed
* to write out another inode in the same block. In this
* case, we don't have to read the block because we may
* read the old inode data successfully.
*/
if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
set_buffer_uptodate(bh);
if (buffer_uptodate(bh)) {
/* someone brought it uptodate while we waited */
unlock_buffer(bh);
goto has_buffer;
}
/*
* If we have all information of the inode in memory and this
* is the only valid inode in the block, we need not read the
* block.
*/
if (in_mem) {
struct buffer_head *bitmap_bh;
int i, start;
start = inode_offset & ~(inodes_per_block - 1);
/* Is the inode bitmap in cache? */
bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
if (unlikely(!bitmap_bh))
goto make_io;
/*
* If the inode bitmap isn't in cache then the
* optimisation may end up performing two reads instead
* of one, so skip it.
*/
if (!buffer_uptodate(bitmap_bh)) {
brelse(bitmap_bh);
goto make_io;
}
for (i = start; i < start + inodes_per_block; i++) {
if (i == inode_offset)
continue;
if (ext4_test_bit(i, bitmap_bh->b_data))
break;
}
brelse(bitmap_bh);
if (i == start + inodes_per_block) {
/* all other inodes are free, so skip I/O */
memset(bh->b_data, 0, bh->b_size);
set_buffer_uptodate(bh);
unlock_buffer(bh);
goto has_buffer;
}
}
make_io:
/*
* If we need to do any I/O, try to pre-readahead extra
* blocks from the inode table.
*/
if (EXT4_SB(sb)->s_inode_readahead_blks) {
ext4_fsblk_t b, end, table;
unsigned num;
__u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
table = ext4_inode_table(sb, gdp);
/* s_inode_readahead_blks is always a power of 2 */
b = block & ~((ext4_fsblk_t) ra_blks - 1);
if (table > b)
b = table;
end = b + ra_blks;
num = EXT4_INODES_PER_GROUP(sb);
if (ext4_has_group_desc_csum(sb))
num -= ext4_itable_unused_count(sb, gdp);
table += num / inodes_per_block;
if (end > table)
end = table;
while (b <= end)
sb_breadahead(sb, b++);
}
/*
* There are other valid inodes in the buffer, this inode
* has in-inode xattrs, or we don't have this inode in memory.
* Read the block from disk.
*/
trace_ext4_load_inode(inode);
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(READ | REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
EXT4_ERROR_INODE_BLOCK(inode, block,
"unable to read itable block");
brelse(bh);
return -EIO;
}
}
has_buffer:
iloc->bh = bh;
return 0;
}
int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
{
/* We have all inode data except xattrs in memory here. */
return __ext4_get_inode_loc(inode, iloc,
!ext4_test_inode_state(inode, EXT4_STATE_XATTR));
}
void ext4_set_inode_flags(struct inode *inode)
{
unsigned int flags = EXT4_I(inode)->i_flags;
unsigned int new_fl = 0;
if (flags & EXT4_SYNC_FL)
new_fl |= S_SYNC;
if (flags & EXT4_APPEND_FL)
new_fl |= S_APPEND;
if (flags & EXT4_IMMUTABLE_FL)
new_fl |= S_IMMUTABLE;
if (flags & EXT4_NOATIME_FL)
new_fl |= S_NOATIME;
if (flags & EXT4_DIRSYNC_FL)
new_fl |= S_DIRSYNC;
if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
new_fl |= S_DAX;
inode_set_flags(inode, new_fl,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX);
}
/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
void ext4_get_inode_flags(struct ext4_inode_info *ei)
{
unsigned int vfs_fl;
unsigned long old_fl, new_fl;
do {
vfs_fl = ei->vfs_inode.i_flags;
old_fl = ei->i_flags;
new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
EXT4_DIRSYNC_FL);
if (vfs_fl & S_SYNC)
new_fl |= EXT4_SYNC_FL;
if (vfs_fl & S_APPEND)
new_fl |= EXT4_APPEND_FL;
if (vfs_fl & S_IMMUTABLE)
new_fl |= EXT4_IMMUTABLE_FL;
if (vfs_fl & S_NOATIME)
new_fl |= EXT4_NOATIME_FL;
if (vfs_fl & S_DIRSYNC)
new_fl |= EXT4_DIRSYNC_FL;
} while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
}
static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
struct ext4_inode_info *ei)
{
blkcnt_t i_blocks ;
struct inode *inode = &(ei->vfs_inode);
struct super_block *sb = inode->i_sb;
if (ext4_has_feature_huge_file(sb)) {
/* we are using combined 48 bit field */
i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
le32_to_cpu(raw_inode->i_blocks_lo);
if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
/* i_blocks represent file system block size */
return i_blocks << (inode->i_blkbits - 9);
} else {
return i_blocks;
}
} else {
return le32_to_cpu(raw_inode->i_blocks_lo);
}
}
static inline void ext4_iget_extra_inode(struct inode *inode,
struct ext4_inode *raw_inode,
struct ext4_inode_info *ei)
{
__le32 *magic = (void *)raw_inode +
EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
ext4_set_inode_state(inode, EXT4_STATE_XATTR);
ext4_find_inline_data_nolock(inode);
} else
EXT4_I(inode)->i_inline_off = 0;
}
int ext4_get_projid(struct inode *inode, kprojid_t *projid)
{
if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_PROJECT))
return -EOPNOTSUPP;
*projid = EXT4_I(inode)->i_projid;
return 0;
}
struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
{
struct ext4_iloc iloc;
struct ext4_inode *raw_inode;
struct ext4_inode_info *ei;
struct inode *inode;
journal_t *journal = EXT4_SB(sb)->s_journal;
long ret;
int block;
uid_t i_uid;
gid_t i_gid;
projid_t i_projid;
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
ei = EXT4_I(inode);
iloc.bh = NULL;
ret = __ext4_get_inode_loc(inode, &iloc, 0);
if (ret < 0)
goto bad_inode;
raw_inode = ext4_raw_inode(&iloc);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
EXT4_INODE_SIZE(inode->i_sb)) {
EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
EXT4_INODE_SIZE(inode->i_sb));
ret = -EFSCORRUPTED;
goto bad_inode;
}
} else
ei->i_extra_isize = 0;
/* Precompute checksum seed for inode metadata */
if (ext4_has_metadata_csum(sb)) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
__u32 csum;
__le32 inum = cpu_to_le32(inode->i_ino);
__le32 gen = raw_inode->i_generation;
csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
sizeof(inum));
ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
sizeof(gen));
}
if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
EXT4_ERROR_INODE(inode, "checksum invalid");
ret = -EFSBADCRC;
goto bad_inode;
}
inode->i_mode = le16_to_cpu(raw_inode->i_mode);
i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_PROJECT) &&
EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
else
i_projid = EXT4_DEF_PROJID;
if (!(test_opt(inode->i_sb, NO_UID32))) {
i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
}
i_uid_write(inode, i_uid);
i_gid_write(inode, i_gid);
ei->i_projid = make_kprojid(&init_user_ns, i_projid);
set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
ei->i_inline_off = 0;
ei->i_dir_start_lookup = 0;
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
/* We now have enough fields to check if the inode was active or not.
* This is needed because nfsd might try to access dead inodes
* the test is that same one that e2fsck uses
* NeilBrown 1999oct15
*/
if (inode->i_nlink == 0) {
if ((inode->i_mode == 0 ||
!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
ino != EXT4_BOOT_LOADER_INO) {
/* this inode is deleted */
ret = -ESTALE;
goto bad_inode;
}
/* The only unlinked inodes we let through here have
* valid i_mode and are being read by the orphan
* recovery code: that's fine, we're about to complete
* the process of deleting those.
* OR it is the EXT4_BOOT_LOADER_INO which is
* not initialized on a new filesystem. */
}
ei->i_flags = le32_to_cpu(raw_inode->i_flags);
inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
if (ext4_has_feature_64bit(sb))
ei->i_file_acl |=
((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
inode->i_size = ext4_isize(raw_inode);
ei->i_disksize = inode->i_size;
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
#endif
inode->i_generation = le32_to_cpu(raw_inode->i_generation);
ei->i_block_group = iloc.block_group;
ei->i_last_alloc_group = ~0;
/*
* NOTE! The in-memory inode i_data array is in little-endian order
* even on big-endian machines: we do NOT byteswap the block numbers!
*/
for (block = 0; block < EXT4_N_BLOCKS; block++)
ei->i_data[block] = raw_inode->i_block[block];
INIT_LIST_HEAD(&ei->i_orphan);
/*
* Set transaction id's of transactions that have to be committed
* to finish f[data]sync. We set them to currently running transaction
* as we cannot be sure that the inode or some of its metadata isn't
* part of the transaction - the inode could have been reclaimed and
* now it is reread from disk.
*/
if (journal) {
transaction_t *transaction;
tid_t tid;
read_lock(&journal->j_state_lock);
if (journal->j_running_transaction)
transaction = journal->j_running_transaction;
else
transaction = journal->j_committing_transaction;
if (transaction)
tid = transaction->t_tid;
else
tid = journal->j_commit_sequence;
read_unlock(&journal->j_state_lock);
ei->i_sync_tid = tid;
ei->i_datasync_tid = tid;
}
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
if (ei->i_extra_isize == 0) {
/* The extra space is currently unused. Use it. */
ei->i_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
} else {
ext4_iget_extra_inode(inode, raw_inode, ei);
}
}
EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
inode->i_version |=
(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
}
}
ret = 0;
if (ei->i_file_acl &&
!ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
ei->i_file_acl);
ret = -EFSCORRUPTED;
goto bad_inode;
} else if (!ext4_has_inline_data(inode)) {
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
(S_ISLNK(inode->i_mode) &&
!ext4_inode_is_fast_symlink(inode))))
/* Validate extent which is part of inode */
ret = ext4_ext_check_inode(inode);
} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
(S_ISLNK(inode->i_mode) &&
!ext4_inode_is_fast_symlink(inode))) {
/* Validate block references which are part of inode */
ret = ext4_ind_check_inode(inode);
}
}
if (ret)
goto bad_inode;
if (S_ISREG(inode->i_mode)) {
inode->i_op = &ext4_file_inode_operations;
inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &ext4_dir_inode_operations;
inode->i_fop = &ext4_dir_operations;
} else if (S_ISLNK(inode->i_mode)) {
if (ext4_encrypted_inode(inode)) {
inode->i_op = &ext4_encrypted_symlink_inode_operations;
ext4_set_aops(inode);
} else if (ext4_inode_is_fast_symlink(inode)) {
inode->i_link = (char *)ei->i_data;
inode->i_op = &ext4_fast_symlink_inode_operations;
nd_terminate_link(ei->i_data, inode->i_size,
sizeof(ei->i_data) - 1);
} else {
inode->i_op = &ext4_symlink_inode_operations;
ext4_set_aops(inode);
}
inode_nohighmem(inode);
} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
inode->i_op = &ext4_special_inode_operations;
if (raw_inode->i_block[0])
init_special_inode(inode, inode->i_mode,
old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
else
init_special_inode(inode, inode->i_mode,
new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
} else if (ino == EXT4_BOOT_LOADER_INO) {
make_bad_inode(inode);
} else {
ret = -EFSCORRUPTED;
EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
goto bad_inode;
}
brelse(iloc.bh);
ext4_set_inode_flags(inode);
unlock_new_inode(inode);
return inode;
bad_inode:
brelse(iloc.bh);
iget_failed(inode);
return ERR_PTR(ret);
}
struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
{
if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
return ERR_PTR(-EFSCORRUPTED);
return ext4_iget(sb, ino);
}
static int ext4_inode_blocks_set(handle_t *handle,
struct ext4_inode *raw_inode,
struct ext4_inode_info *ei)
{
struct inode *inode = &(ei->vfs_inode);
u64 i_blocks = inode->i_blocks;
struct super_block *sb = inode->i_sb;
if (i_blocks <= ~0U) {
/*
* i_blocks can be represented in a 32 bit variable
* as multiple of 512 bytes
*/
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
raw_inode->i_blocks_high = 0;
ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
return 0;
}
if (!ext4_has_feature_huge_file(sb))
return -EFBIG;
if (i_blocks <= 0xffffffffffffULL) {
/*
* i_blocks can be represented in a 48 bit variable
* as multiple of 512 bytes
*/
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
} else {
ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
/* i_block is stored in file system block size */
i_blocks = i_blocks >> (inode->i_blkbits - 9);
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
}
return 0;
}
struct other_inode {
unsigned long orig_ino;
struct ext4_inode *raw_inode;
};
static int other_inode_match(struct inode * inode, unsigned long ino,
void *data)
{
struct other_inode *oi = (struct other_inode *) data;
if ((inode->i_ino != ino) ||
(inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
((inode->i_state & I_DIRTY_TIME) == 0))
return 0;
spin_lock(&inode->i_lock);
if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
I_DIRTY_SYNC | I_DIRTY_DATASYNC)) == 0) &&
(inode->i_state & I_DIRTY_TIME)) {
struct ext4_inode_info *ei = EXT4_I(inode);
inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
spin_unlock(&inode->i_lock);
spin_lock(&ei->i_raw_lock);
EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode);
EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode);
EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode);
ext4_inode_csum_set(inode, oi->raw_inode, ei);
spin_unlock(&ei->i_raw_lock);
trace_ext4_other_inode_update_time(inode, oi->orig_ino);
return -1;
}
spin_unlock(&inode->i_lock);
return -1;
}
/*
* Opportunistically update the other time fields for other inodes in
* the same inode table block.
*/
static void ext4_update_other_inodes_time(struct super_block *sb,
unsigned long orig_ino, char *buf)
{
struct other_inode oi;
unsigned long ino;
int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
int inode_size = EXT4_INODE_SIZE(sb);
oi.orig_ino = orig_ino;
/*
* Calculate the first inode in the inode table block. Inode
* numbers are one-based. That is, the first inode in a block
* (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
*/
ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
if (ino == orig_ino)
continue;
oi.raw_inode = (struct ext4_inode *) buf;
(void) find_inode_nowait(sb, ino, other_inode_match, &oi);
}
}
/*
* Post the struct inode info into an on-disk inode location in the
* buffer-cache. This gobbles the caller's reference to the
* buffer_head in the inode location struct.
*
* The caller must have write access to iloc->bh.
*/
static int ext4_do_update_inode(handle_t *handle,
struct inode *inode,
struct ext4_iloc *iloc)
{
struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
struct ext4_inode_info *ei = EXT4_I(inode);
struct buffer_head *bh = iloc->bh;
struct super_block *sb = inode->i_sb;
int err = 0, rc, block;
int need_datasync = 0, set_large_file = 0;
uid_t i_uid;
gid_t i_gid;
projid_t i_projid;
spin_lock(&ei->i_raw_lock);
/* For fields not tracked in the in-memory inode,
* initialise them to zero for new inodes. */
if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
ext4_get_inode_flags(ei);
raw_inode->i_mode = cpu_to_le16(inode->i_mode);
i_uid = i_uid_read(inode);
i_gid = i_gid_read(inode);
i_projid = from_kprojid(&init_user_ns, ei->i_projid);
if (!(test_opt(inode->i_sb, NO_UID32))) {
raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
/*
* Fix up interoperability with old kernels. Otherwise, old inodes get
* re-used with the upper 16 bits of the uid/gid intact
*/
if (!ei->i_dtime) {
raw_inode->i_uid_high =
cpu_to_le16(high_16_bits(i_uid));
raw_inode->i_gid_high =
cpu_to_le16(high_16_bits(i_gid));
} else {
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
}
} else {
raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
}
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
err = ext4_inode_blocks_set(handle, raw_inode, ei);
if (err) {
spin_unlock(&ei->i_raw_lock);
goto out_brelse;
}
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
raw_inode->i_file_acl_high =
cpu_to_le16(ei->i_file_acl >> 32);
raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
if (ei->i_disksize != ext4_isize(raw_inode)) {
ext4_isize_set(raw_inode, ei->i_disksize);
need_datasync = 1;
}
if (ei->i_disksize > 0x7fffffffULL) {
if (!ext4_has_feature_large_file(sb) ||
EXT4_SB(sb)->s_es->s_rev_level ==
cpu_to_le32(EXT4_GOOD_OLD_REV))
set_large_file = 1;
}
raw_inode->i_generation = cpu_to_le32(inode->i_generation);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
if (old_valid_dev(inode->i_rdev)) {
raw_inode->i_block[0] =
cpu_to_le32(old_encode_dev(inode->i_rdev));
raw_inode->i_block[1] = 0;
} else {
raw_inode->i_block[0] = 0;
raw_inode->i_block[1] =
cpu_to_le32(new_encode_dev(inode->i_rdev));
raw_inode->i_block[2] = 0;
}
} else if (!ext4_has_inline_data(inode)) {
for (block = 0; block < EXT4_N_BLOCKS; block++)
raw_inode->i_block[block] = ei->i_data[block];
}
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
if (ei->i_extra_isize) {
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
raw_inode->i_version_hi =
cpu_to_le32(inode->i_version >> 32);
raw_inode->i_extra_isize =
cpu_to_le16(ei->i_extra_isize);
}
}
BUG_ON(!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_PROJECT) &&
i_projid != EXT4_DEF_PROJID);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
raw_inode->i_projid = cpu_to_le32(i_projid);
ext4_inode_csum_set(inode, raw_inode, ei);
spin_unlock(&ei->i_raw_lock);
if (inode->i_sb->s_flags & MS_LAZYTIME)
ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
bh->b_data);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
rc = ext4_handle_dirty_metadata(handle, NULL, bh);
if (!err)
err = rc;
ext4_clear_inode_state(inode, EXT4_STATE_NEW);
if (set_large_file) {
BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
if (err)
goto out_brelse;
ext4_update_dynamic_rev(sb);
ext4_set_feature_large_file(sb);
ext4_handle_sync(handle);
err = ext4_handle_dirty_super(handle, sb);
}
ext4_update_inode_fsync_trans(handle, inode, need_datasync);
out_brelse:
brelse(bh);
ext4_std_error(inode->i_sb, err);
return err;
}
/*
* ext4_write_inode()
*
* We are called from a few places:
*
* - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
* Here, there will be no transaction running. We wait for any running
* transaction to commit.
*
* - Within flush work (sys_sync(), kupdate and such).
* We wait on commit, if told to.
*
* - Within iput_final() -> write_inode_now()
* We wait on commit, if told to.
*
* In all cases it is actually safe for us to return without doing anything,
* because the inode has been copied into a raw inode buffer in
* ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL
* writeback.
*
* Note that we are absolutely dependent upon all inode dirtiers doing the
* right thing: they *must* call mark_inode_dirty() after dirtying info in
* which we are interested.
*
* It would be a bug for them to not do this. The code:
*
* mark_inode_dirty(inode)
* stuff();
* inode->i_size = expr;
*
* is in error because write_inode() could occur while `stuff()' is running,
* and the new i_size will be lost. Plus the inode will no longer be on the
* superblock's dirty inode list.
*/
int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int err;
if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
return 0;
if (EXT4_SB(inode->i_sb)->s_journal) {
if (ext4_journal_current_handle()) {
jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
dump_stack();
return -EIO;
}
/*
* No need to force transaction in WB_SYNC_NONE mode. Also
* ext4_sync_fs() will force the commit after everything is
* written.
*/
if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
return 0;
err = ext4_force_commit(inode->i_sb);
} else {
struct ext4_iloc iloc;
err = __ext4_get_inode_loc(inode, &iloc, 0);
if (err)
return err;
/*
* sync(2) will flush the whole buffer cache. No need to do
* it here separately for each inode.
*/
if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
sync_dirty_buffer(iloc.bh);
if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
"IO error syncing inode");
err = -EIO;
}
brelse(iloc.bh);
}
return err;
}
/*
* In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
* buffers that are attached to a page stradding i_size and are undergoing
* commit. In that case we have to wait for commit to finish and try again.
*/
static void ext4_wait_for_tail_page_commit(struct inode *inode)
{
struct page *page;
unsigned offset;
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
tid_t commit_tid = 0;
int ret;
offset = inode->i_size & (PAGE_SIZE - 1);
/*
* All buffers in the last page remain valid? Then there's nothing to
* do. We do the check mainly to optimize the common PAGE_SIZE ==
* blocksize case
*/
if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
return;
while (1) {
page = find_lock_page(inode->i_mapping,
inode->i_size >> PAGE_SHIFT);
if (!page)
return;
ret = __ext4_journalled_invalidatepage(page, offset,
PAGE_SIZE - offset);
unlock_page(page);
put_page(page);
if (ret != -EBUSY)
return;
commit_tid = 0;
read_lock(&journal->j_state_lock);
if (journal->j_committing_transaction)
commit_tid = journal->j_committing_transaction->t_tid;
read_unlock(&journal->j_state_lock);
if (commit_tid)
jbd2_log_wait_commit(journal, commit_tid);
}
}
/*
* ext4_setattr()
*
* Called from notify_change.
*
* We want to trap VFS attempts to truncate the file as soon as
* possible. In particular, we want to make sure that when the VFS
* shrinks i_size, we put the inode on the orphan list and modify
* i_disksize immediately, so that during the subsequent flushing of
* dirty pages and freeing of disk blocks, we can guarantee that any
* commit will leave the blocks being flushed in an unused state on
* disk. (On recovery, the inode will get truncated and the blocks will
* be freed, so we have a strong guarantee that no future commit will
* leave these blocks visible to the user.)
*
* Another thing we have to assure is that if we are in ordered mode
* and inode is still attached to the committing transaction, we must
* we start writeout of all the dirty pages which are being truncated.
* This way we are sure that all the data written in the previous
* transaction are already on disk (truncate waits for pages under
* writeback).
*
* Called with inode->i_mutex down.
*/
int ext4_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int error, rc = 0;
int orphan = 0;
const unsigned int ia_valid = attr->ia_valid;
error = inode_change_ok(inode, attr);
if (error)
return error;
if (is_quota_modification(inode, attr)) {
error = dquot_initialize(inode);
if (error)
return error;
}
if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
(ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
handle_t *handle;
/* (user+group)*(old+new) structure, inode write (sb,
* inode block, ? - but truncate inode update has it) */
handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
(EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
goto err_out;
}
error = dquot_transfer(inode, attr);
if (error) {
ext4_journal_stop(handle);
return error;
}
/* Update corresponding info in inode so that everything is in
* one transaction */
if (attr->ia_valid & ATTR_UID)
inode->i_uid = attr->ia_uid;
if (attr->ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
error = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
}
if (attr->ia_valid & ATTR_SIZE) {
handle_t *handle;
loff_t oldsize = inode->i_size;
int shrink = (attr->ia_size <= inode->i_size);
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
if (attr->ia_size > sbi->s_bitmap_maxbytes)
return -EFBIG;
}
if (!S_ISREG(inode->i_mode))
return -EINVAL;
if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
inode_inc_iversion(inode);
if (ext4_should_order_data(inode) &&
(attr->ia_size < inode->i_size)) {
error = ext4_begin_ordered_truncate(inode,
attr->ia_size);
if (error)
goto err_out;
}
if (attr->ia_size != inode->i_size) {
handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
goto err_out;
}
if (ext4_handle_valid(handle) && shrink) {
error = ext4_orphan_add(handle, inode);
orphan = 1;
}
/*
* Update c/mtime on truncate up, ext4_truncate() will
* update c/mtime in shrink case below
*/
if (!shrink) {
inode->i_mtime = ext4_current_time(inode);
inode->i_ctime = inode->i_mtime;
}
down_write(&EXT4_I(inode)->i_data_sem);
EXT4_I(inode)->i_disksize = attr->ia_size;
rc = ext4_mark_inode_dirty(handle, inode);
if (!error)
error = rc;
/*
* We have to update i_size under i_data_sem together
* with i_disksize to avoid races with writeback code
* running ext4_wb_update_i_disksize().
*/
if (!error)
i_size_write(inode, attr->ia_size);
up_write(&EXT4_I(inode)->i_data_sem);
ext4_journal_stop(handle);
if (error) {
if (orphan)
ext4_orphan_del(NULL, inode);
goto err_out;
}
}
if (!shrink)
pagecache_isize_extended(inode, oldsize, inode->i_size);
/*
* Blocks are going to be removed from the inode. Wait
* for dio in flight. Temporarily disable
* dioread_nolock to prevent livelock.
*/
if (orphan) {
if (!ext4_should_journal_data(inode)) {
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
ext4_inode_resume_unlocked_dio(inode);
} else
ext4_wait_for_tail_page_commit(inode);
}
down_write(&EXT4_I(inode)->i_mmap_sem);
/*
* Truncate pagecache after we've waited for commit
* in data=journal mode to make pages freeable.
*/
truncate_pagecache(inode, inode->i_size);
if (shrink)
ext4_truncate(inode);
up_write(&EXT4_I(inode)->i_mmap_sem);
}
if (!rc) {
setattr_copy(inode, attr);
mark_inode_dirty(inode);
}
/*
* If the call to ext4_truncate failed to get a transaction handle at
* all, we need to clean up the in-core orphan list manually.
*/
if (orphan && inode->i_nlink)
ext4_orphan_del(NULL, inode);
if (!rc && (ia_valid & ATTR_MODE))
rc = posix_acl_chmod(inode, inode->i_mode);
err_out:
ext4_std_error(inode->i_sb, error);
if (!error)
error = rc;
return error;
}
int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct inode *inode;
unsigned long long delalloc_blocks;
inode = d_inode(dentry);
generic_fillattr(inode, stat);
/*
* If there is inline data in the inode, the inode will normally not
* have data blocks allocated (it may have an external xattr block).
* Report at least one sector for such files, so tools like tar, rsync,
* others doen't incorrectly think the file is completely sparse.
*/
if (unlikely(ext4_has_inline_data(inode)))
stat->blocks += (stat->size + 511) >> 9;
/*
* We can't update i_blocks if the block allocation is delayed
* otherwise in the case of system crash before the real block
* allocation is done, we will have i_blocks inconsistent with
* on-disk file blocks.
* We always keep i_blocks updated together with real
* allocation. But to not confuse with user, stat
* will return the blocks that include the delayed allocation
* blocks for this file.
*/
delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
EXT4_I(inode)->i_reserved_data_blocks);
stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
return 0;
}
static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
int pextents)
{
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return ext4_ind_trans_blocks(inode, lblocks);
return ext4_ext_index_trans_blocks(inode, pextents);
}
/*
* Account for index blocks, block groups bitmaps and block group
* descriptor blocks if modify datablocks and index blocks
* worse case, the indexs blocks spread over different block groups
*
* If datablocks are discontiguous, they are possible to spread over
* different block groups too. If they are contiguous, with flexbg,
* they could still across block group boundary.
*
* Also account for superblock, inode, quota and xattr blocks
*/
static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
int pextents)
{
ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
int gdpblocks;
int idxblocks;
int ret = 0;
/*
* How many index blocks need to touch to map @lblocks logical blocks
* to @pextents physical extents?
*/
idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
ret = idxblocks;
/*
* Now let's see how many group bitmaps and group descriptors need
* to account
*/
groups = idxblocks + pextents;
gdpblocks = groups;
if (groups > ngroups)
groups = ngroups;
if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
/* bitmaps and block group descriptor blocks */
ret += groups + gdpblocks;
/* Blocks for super block, inode, quota and xattr blocks */
ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
return ret;
}
/*
* Calculate the total number of credits to reserve to fit
* the modification of a single pages into a single transaction,
* which may include multiple chunks of block allocations.
*
* This could be called via ext4_write_begin()
*
* We need to consider the worse case, when
* one new block per extent.
*/
int ext4_writepage_trans_blocks(struct inode *inode)
{
int bpp = ext4_journal_blocks_per_page(inode);
int ret;
ret = ext4_meta_trans_blocks(inode, bpp, bpp);
/* Account for data blocks for journalled mode */
if (ext4_should_journal_data(inode))
ret += bpp;
return ret;
}
/*
* Calculate the journal credits for a chunk of data modification.
*
* This is called from DIO, fallocate or whoever calling
* ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
*
* journal buffers for data blocks are not included here, as DIO
* and fallocate do no need to journal data buffers.
*/
int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
{
return ext4_meta_trans_blocks(inode, nrblocks, 1);
}
/*
* The caller must have previously called ext4_reserve_inode_write().
* Give this, we know that the caller already has write access to iloc->bh.
*/
int ext4_mark_iloc_dirty(handle_t *handle,
struct inode *inode, struct ext4_iloc *iloc)
{
int err = 0;
if (IS_I_VERSION(inode))
inode_inc_iversion(inode);
/* the do_update_inode consumes one bh->b_count */
get_bh(iloc->bh);
/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
err = ext4_do_update_inode(handle, inode, iloc);
put_bh(iloc->bh);
return err;
}
/*
* On success, We end up with an outstanding reference count against
* iloc->bh. This _must_ be cleaned up later.
*/
int
ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
struct ext4_iloc *iloc)
{
int err;
err = ext4_get_inode_loc(inode, iloc);
if (!err) {
BUFFER_TRACE(iloc->bh, "get_write_access");
err = ext4_journal_get_write_access(handle, iloc->bh);
if (err) {
brelse(iloc->bh);
iloc->bh = NULL;
}
}
ext4_std_error(inode->i_sb, err);
return err;
}
/*
* Expand an inode by new_extra_isize bytes.
* Returns 0 on success or negative error number on failure.
*/
static int ext4_expand_extra_isize(struct inode *inode,
unsigned int new_extra_isize,
struct ext4_iloc iloc,
handle_t *handle)
{
struct ext4_inode *raw_inode;
struct ext4_xattr_ibody_header *header;
if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
return 0;
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
/* No extended attributes present */
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
new_extra_isize);
EXT4_I(inode)->i_extra_isize = new_extra_isize;
return 0;
}
/* try to expand with EAs present */
return ext4_expand_extra_isize_ea(inode, new_extra_isize,
raw_inode, handle);
}
/*
* What we do here is to mark the in-core inode as clean with respect to inode
* dirtiness (it may still be data-dirty).
* This means that the in-core inode may be reaped by prune_icache
* without having to perform any I/O. This is a very good thing,
* because *any* task may call prune_icache - even ones which
* have a transaction open against a different journal.
*
* Is this cheating? Not really. Sure, we haven't written the
* inode out, but prune_icache isn't a user-visible syncing function.
* Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
* we start and wait on commits.
*/
int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
{
struct ext4_iloc iloc;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
static unsigned int mnt_count;
int err, ret;
might_sleep();
trace_ext4_mark_inode_dirty(inode, _RET_IP_);
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err)
return err;
if (ext4_handle_valid(handle) &&
EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
!ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
/*
* We need extra buffer credits since we may write into EA block
* with this same handle. If journal_extend fails, then it will
* only result in a minor loss of functionality for that inode.
* If this is felt to be critical, then e2fsck should be run to
* force a large enough s_min_extra_isize.
*/
if ((jbd2_journal_extend(handle,
EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
ret = ext4_expand_extra_isize(inode,
sbi->s_want_extra_isize,
iloc, handle);
if (ret) {
ext4_set_inode_state(inode,
EXT4_STATE_NO_EXPAND);
if (mnt_count !=
le16_to_cpu(sbi->s_es->s_mnt_count)) {
ext4_warning(inode->i_sb,
"Unable to expand inode %lu. Delete"
" some EAs or run e2fsck.",
inode->i_ino);
mnt_count =
le16_to_cpu(sbi->s_es->s_mnt_count);
}
}
}
}
return ext4_mark_iloc_dirty(handle, inode, &iloc);
}
/*
* ext4_dirty_inode() is called from __mark_inode_dirty()
*
* We're really interested in the case where a file is being extended.
* i_size has been changed by generic_commit_write() and we thus need
* to include the updated inode in the current transaction.
*
* Also, dquot_alloc_block() will always dirty the inode when blocks
* are allocated to the file.
*
* If the inode is marked synchronous, we don't honour that here - doing
* so would cause a commit on atime updates, which we don't bother doing.
* We handle synchronous inodes at the highest possible level.
*
* If only the I_DIRTY_TIME flag is set, we can skip everything. If
* I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need
* to copy into the on-disk inode structure are the timestamp files.
*/
void ext4_dirty_inode(struct inode *inode, int flags)
{
handle_t *handle;
if (flags == I_DIRTY_TIME)
return;
handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
if (IS_ERR(handle))
goto out;
ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
out:
return;
}
#if 0
/*
* Bind an inode's backing buffer_head into this transaction, to prevent
* it from being flushed to disk early. Unlike
* ext4_reserve_inode_write, this leaves behind no bh reference and
* returns no iloc structure, so the caller needs to repeat the iloc
* lookup to mark the inode dirty later.
*/
static int ext4_pin_inode(handle_t *handle, struct inode *inode)
{
struct ext4_iloc iloc;
int err = 0;
if (handle) {
err = ext4_get_inode_loc(inode, &iloc);
if (!err) {
BUFFER_TRACE(iloc.bh, "get_write_access");
err = jbd2_journal_get_write_access(handle, iloc.bh);
if (!err)
err = ext4_handle_dirty_metadata(handle,
NULL,
iloc.bh);
brelse(iloc.bh);
}
}
ext4_std_error(inode->i_sb, err);
return err;
}
#endif
int ext4_change_inode_journal_flag(struct inode *inode, int val)
{
journal_t *journal;
handle_t *handle;
int err;
/*
* We have to be very careful here: changing a data block's
* journaling status dynamically is dangerous. If we write a
* data block to the journal, change the status and then delete
* that block, we risk forgetting to revoke the old log record
* from the journal and so a subsequent replay can corrupt data.
* So, first we make sure that the journal is empty and that
* nobody is changing anything.
*/
journal = EXT4_JOURNAL(inode);
if (!journal)
return 0;
if (is_journal_aborted(journal))
return -EROFS;
/* We have to allocate physical blocks for delalloc blocks
* before flushing journal. otherwise delalloc blocks can not
* be allocated any more. even more truncate on delalloc blocks
* could trigger BUG by flushing delalloc blocks in journal.
* There is no delalloc block in non-journal data mode.
*/
if (val && test_opt(inode->i_sb, DELALLOC)) {
err = ext4_alloc_da_blocks(inode);
if (err < 0)
return err;
}
/* Wait for all existing dio workers */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
jbd2_journal_lock_updates(journal);
/*
* OK, there are no updates running now, and all cached data is
* synced to disk. We are now in a completely consistent state
* which doesn't have anything in the journal, and we know that
* no filesystem updates are running, so it is safe to modify
* the inode's in-core data-journaling state flag now.
*/
if (val)
ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
else {
err = jbd2_journal_flush(journal);
if (err < 0) {
jbd2_journal_unlock_updates(journal);
ext4_inode_resume_unlocked_dio(inode);
return err;
}
ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
}
ext4_set_aops(inode);
jbd2_journal_unlock_updates(journal);
ext4_inode_resume_unlocked_dio(inode);
/* Finally we can mark the inode as dirty. */
handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle))
return PTR_ERR(handle);
err = ext4_mark_inode_dirty(handle, inode);
ext4_handle_sync(handle);
ext4_journal_stop(handle);
ext4_std_error(inode->i_sb, err);
return err;
}
static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
{
return !buffer_mapped(bh);
}
int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
loff_t size;
unsigned long len;
int ret;
struct file *file = vma->vm_file;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
handle_t *handle;
get_block_t *get_block;
int retries = 0;
sb_start_pagefault(inode->i_sb);
file_update_time(vma->vm_file);
down_read(&EXT4_I(inode)->i_mmap_sem);
/* Delalloc case is easy... */
if (test_opt(inode->i_sb, DELALLOC) &&
!ext4_should_journal_data(inode) &&
!ext4_nonda_switch(inode->i_sb)) {
do {
ret = block_page_mkwrite(vma, vmf,
ext4_da_get_block_prep);
} while (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries));
goto out_ret;
}
lock_page(page);
size = i_size_read(inode);
/* Page got truncated from under us? */
if (page->mapping != mapping || page_offset(page) > size) {
unlock_page(page);
ret = VM_FAULT_NOPAGE;
goto out;
}
if (page->index == size >> PAGE_SHIFT)
len = size & ~PAGE_MASK;
else
len = PAGE_SIZE;
/*
* Return if we have all the buffers mapped. This avoids the need to do
* journal_start/journal_stop which can block and take a long time
*/
if (page_has_buffers(page)) {
if (!ext4_walk_page_buffers(NULL, page_buffers(page),
0, len, NULL,
ext4_bh_unmapped)) {
/* Wait so that we don't change page under IO */
wait_for_stable_page(page);
ret = VM_FAULT_LOCKED;
goto out;
}
}
unlock_page(page);
/* OK, we need to fill the hole... */
if (ext4_should_dioread_nolock(inode))
get_block = ext4_get_block_unwritten;
else
get_block = ext4_get_block;
retry_alloc:
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
ext4_writepage_trans_blocks(inode));
if (IS_ERR(handle)) {
ret = VM_FAULT_SIGBUS;
goto out;
}
ret = block_page_mkwrite(vma, vmf, get_block);
if (!ret && ext4_should_journal_data(inode)) {
if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
PAGE_SIZE, NULL, do_journal_get_write_access)) {
unlock_page(page);
ret = VM_FAULT_SIGBUS;
ext4_journal_stop(handle);
goto out;
}
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
}
ext4_journal_stop(handle);
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_alloc;
out_ret:
ret = block_page_mkwrite_return(ret);
out:
up_read(&EXT4_I(inode)->i_mmap_sem);
sb_end_pagefault(inode->i_sb);
return ret;
}
int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct inode *inode = file_inode(vma->vm_file);
int err;
down_read(&EXT4_I(inode)->i_mmap_sem);
err = filemap_fault(vma, vmf);
up_read(&EXT4_I(inode)->i_mmap_sem);
return err;
}
/*
* Find the first extent at or after @lblk in an inode that is not a hole.
* Search for @map_len blocks at most. The extent is returned in @result.
*
* The function returns 1 if we found an extent. The function returns 0 in
* case there is no extent at or after @lblk and in that case also sets
* @result->es_len to 0. In case of error, the error code is returned.
*/
int ext4_get_next_extent(struct inode *inode, ext4_lblk_t lblk,
unsigned int map_len, struct extent_status *result)
{
struct ext4_map_blocks map;
struct extent_status es = {};
int ret;
map.m_lblk = lblk;
map.m_len = map_len;
/*
* For non-extent based files this loop may iterate several times since
* we do not determine full hole size.
*/
while (map.m_len > 0) {
ret = ext4_map_blocks(NULL, inode, &map, 0);
if (ret < 0)
return ret;
/* There's extent covering m_lblk? Just return it. */
if (ret > 0) {
int status;
ext4_es_store_pblock(result, map.m_pblk);
result->es_lblk = map.m_lblk;
result->es_len = map.m_len;
if (map.m_flags & EXT4_MAP_UNWRITTEN)
status = EXTENT_STATUS_UNWRITTEN;
else
status = EXTENT_STATUS_WRITTEN;
ext4_es_store_status(result, status);
return 1;
}
ext4_es_find_delayed_extent_range(inode, map.m_lblk,
map.m_lblk + map.m_len - 1,
&es);
/* Is delalloc data before next block in extent tree? */
if (es.es_len && es.es_lblk < map.m_lblk + map.m_len) {
ext4_lblk_t offset = 0;
if (es.es_lblk < lblk)
offset = lblk - es.es_lblk;
result->es_lblk = es.es_lblk + offset;
ext4_es_store_pblock(result,
ext4_es_pblock(&es) + offset);
result->es_len = es.es_len - offset;
ext4_es_store_status(result, ext4_es_status(&es));
return 1;
}
/* There's a hole at m_lblk, advance us after it */
map.m_lblk += map.m_len;
map_len -= map.m_len;
map.m_len = map_len;
cond_resched();
}
result->es_len = 0;
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_3270_0 |
crossvul-cpp_data_bad_3822_1 | /* xfrm_user.c: User interface to configure xfrm engine.
*
* Copyright (C) 2002 David S. Miller (davem@redhat.com)
*
* Changes:
* Mitsuru KANDA @USAGI
* Kazunori MIYAZAWA @USAGI
* Kunihiro Ishiguro <kunihiro@ipinfusion.com>
* IPv6 support
*
*/
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/string.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/pfkeyv2.h>
#include <linux/ipsec.h>
#include <linux/init.h>
#include <linux/security.h>
#include <net/sock.h>
#include <net/xfrm.h>
#include <net/netlink.h>
#include <net/ah.h>
#include <asm/uaccess.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <linux/in6.h>
#endif
static inline int aead_len(struct xfrm_algo_aead *alg)
{
return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
}
static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
{
struct nlattr *rt = attrs[type];
struct xfrm_algo *algp;
if (!rt)
return 0;
algp = nla_data(rt);
if (nla_len(rt) < xfrm_alg_len(algp))
return -EINVAL;
switch (type) {
case XFRMA_ALG_AUTH:
case XFRMA_ALG_CRYPT:
case XFRMA_ALG_COMP:
break;
default:
return -EINVAL;
}
algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
return 0;
}
static int verify_auth_trunc(struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
struct xfrm_algo_auth *algp;
if (!rt)
return 0;
algp = nla_data(rt);
if (nla_len(rt) < xfrm_alg_auth_len(algp))
return -EINVAL;
algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
return 0;
}
static int verify_aead(struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
struct xfrm_algo_aead *algp;
if (!rt)
return 0;
algp = nla_data(rt);
if (nla_len(rt) < aead_len(algp))
return -EINVAL;
algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
return 0;
}
static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
xfrm_address_t **addrp)
{
struct nlattr *rt = attrs[type];
if (rt && addrp)
*addrp = nla_data(rt);
}
static inline int verify_sec_ctx_len(struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_user_sec_ctx *uctx;
if (!rt)
return 0;
uctx = nla_data(rt);
if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
return -EINVAL;
return 0;
}
static inline int verify_replay(struct xfrm_usersa_info *p,
struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
if ((p->flags & XFRM_STATE_ESN) && !rt)
return -EINVAL;
if (!rt)
return 0;
if (p->id.proto != IPPROTO_ESP)
return -EINVAL;
if (p->replay_window != 0)
return -EINVAL;
return 0;
}
static int verify_newsa_info(struct xfrm_usersa_info *p,
struct nlattr **attrs)
{
int err;
err = -EINVAL;
switch (p->family) {
case AF_INET:
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
break;
#else
err = -EAFNOSUPPORT;
goto out;
#endif
default:
goto out;
}
err = -EINVAL;
switch (p->id.proto) {
case IPPROTO_AH:
if ((!attrs[XFRMA_ALG_AUTH] &&
!attrs[XFRMA_ALG_AUTH_TRUNC]) ||
attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_CRYPT] ||
attrs[XFRMA_ALG_COMP] ||
attrs[XFRMA_TFCPAD])
goto out;
break;
case IPPROTO_ESP:
if (attrs[XFRMA_ALG_COMP])
goto out;
if (!attrs[XFRMA_ALG_AUTH] &&
!attrs[XFRMA_ALG_AUTH_TRUNC] &&
!attrs[XFRMA_ALG_CRYPT] &&
!attrs[XFRMA_ALG_AEAD])
goto out;
if ((attrs[XFRMA_ALG_AUTH] ||
attrs[XFRMA_ALG_AUTH_TRUNC] ||
attrs[XFRMA_ALG_CRYPT]) &&
attrs[XFRMA_ALG_AEAD])
goto out;
if (attrs[XFRMA_TFCPAD] &&
p->mode != XFRM_MODE_TUNNEL)
goto out;
break;
case IPPROTO_COMP:
if (!attrs[XFRMA_ALG_COMP] ||
attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_AUTH] ||
attrs[XFRMA_ALG_AUTH_TRUNC] ||
attrs[XFRMA_ALG_CRYPT] ||
attrs[XFRMA_TFCPAD])
goto out;
break;
#if IS_ENABLED(CONFIG_IPV6)
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
if (attrs[XFRMA_ALG_COMP] ||
attrs[XFRMA_ALG_AUTH] ||
attrs[XFRMA_ALG_AUTH_TRUNC] ||
attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_CRYPT] ||
attrs[XFRMA_ENCAP] ||
attrs[XFRMA_SEC_CTX] ||
attrs[XFRMA_TFCPAD] ||
!attrs[XFRMA_COADDR])
goto out;
break;
#endif
default:
goto out;
}
if ((err = verify_aead(attrs)))
goto out;
if ((err = verify_auth_trunc(attrs)))
goto out;
if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
goto out;
if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
goto out;
if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
goto out;
if ((err = verify_sec_ctx_len(attrs)))
goto out;
if ((err = verify_replay(p, attrs)))
goto out;
err = -EINVAL;
switch (p->mode) {
case XFRM_MODE_TRANSPORT:
case XFRM_MODE_TUNNEL:
case XFRM_MODE_ROUTEOPTIMIZATION:
case XFRM_MODE_BEET:
break;
default:
goto out;
}
err = 0;
out:
return err;
}
static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
struct xfrm_algo_desc *(*get_byname)(const char *, int),
struct nlattr *rta)
{
struct xfrm_algo *p, *ualg;
struct xfrm_algo_desc *algo;
if (!rta)
return 0;
ualg = nla_data(rta);
algo = get_byname(ualg->alg_name, 1);
if (!algo)
return -ENOSYS;
*props = algo->desc.sadb_alg_id;
p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
if (!p)
return -ENOMEM;
strcpy(p->alg_name, algo->name);
*algpp = p;
return 0;
}
static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
struct nlattr *rta)
{
struct xfrm_algo *ualg;
struct xfrm_algo_auth *p;
struct xfrm_algo_desc *algo;
if (!rta)
return 0;
ualg = nla_data(rta);
algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
if (!algo)
return -ENOSYS;
*props = algo->desc.sadb_alg_id;
p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
if (!p)
return -ENOMEM;
strcpy(p->alg_name, algo->name);
p->alg_key_len = ualg->alg_key_len;
p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
*algpp = p;
return 0;
}
static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
struct nlattr *rta)
{
struct xfrm_algo_auth *p, *ualg;
struct xfrm_algo_desc *algo;
if (!rta)
return 0;
ualg = nla_data(rta);
algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
if (!algo)
return -ENOSYS;
if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN ||
ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
return -EINVAL;
*props = algo->desc.sadb_alg_id;
p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
if (!p)
return -ENOMEM;
strcpy(p->alg_name, algo->name);
if (!p->alg_trunc_len)
p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
*algpp = p;
return 0;
}
static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props,
struct nlattr *rta)
{
struct xfrm_algo_aead *p, *ualg;
struct xfrm_algo_desc *algo;
if (!rta)
return 0;
ualg = nla_data(rta);
algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
if (!algo)
return -ENOSYS;
*props = algo->desc.sadb_alg_id;
p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
if (!p)
return -ENOMEM;
strcpy(p->alg_name, algo->name);
*algpp = p;
return 0;
}
static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
struct nlattr *rp)
{
struct xfrm_replay_state_esn *up;
if (!replay_esn || !rp)
return 0;
up = nla_data(rp);
if (xfrm_replay_state_esn_len(replay_esn) !=
xfrm_replay_state_esn_len(up))
return -EINVAL;
return 0;
}
static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
struct xfrm_replay_state_esn **preplay_esn,
struct nlattr *rta)
{
struct xfrm_replay_state_esn *p, *pp, *up;
if (!rta)
return 0;
up = nla_data(rta);
p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
if (!p)
return -ENOMEM;
pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
if (!pp) {
kfree(p);
return -ENOMEM;
}
*replay_esn = p;
*preplay_esn = pp;
return 0;
}
static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
{
int len = 0;
if (xfrm_ctx) {
len += sizeof(struct xfrm_user_sec_ctx);
len += xfrm_ctx->ctx_len;
}
return len;
}
static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
{
memcpy(&x->id, &p->id, sizeof(x->id));
memcpy(&x->sel, &p->sel, sizeof(x->sel));
memcpy(&x->lft, &p->lft, sizeof(x->lft));
x->props.mode = p->mode;
x->props.replay_window = p->replay_window;
x->props.reqid = p->reqid;
x->props.family = p->family;
memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
x->props.flags = p->flags;
if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
x->sel.family = p->family;
}
/*
* someday when pfkey also has support, we could have the code
* somehow made shareable and move it to xfrm_state.c - JHS
*
*/
static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
{
struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
if (re) {
struct xfrm_replay_state_esn *replay_esn;
replay_esn = nla_data(re);
memcpy(x->replay_esn, replay_esn,
xfrm_replay_state_esn_len(replay_esn));
memcpy(x->preplay_esn, replay_esn,
xfrm_replay_state_esn_len(replay_esn));
}
if (rp) {
struct xfrm_replay_state *replay;
replay = nla_data(rp);
memcpy(&x->replay, replay, sizeof(*replay));
memcpy(&x->preplay, replay, sizeof(*replay));
}
if (lt) {
struct xfrm_lifetime_cur *ltime;
ltime = nla_data(lt);
x->curlft.bytes = ltime->bytes;
x->curlft.packets = ltime->packets;
x->curlft.add_time = ltime->add_time;
x->curlft.use_time = ltime->use_time;
}
if (et)
x->replay_maxage = nla_get_u32(et);
if (rt)
x->replay_maxdiff = nla_get_u32(rt);
}
static struct xfrm_state *xfrm_state_construct(struct net *net,
struct xfrm_usersa_info *p,
struct nlattr **attrs,
int *errp)
{
struct xfrm_state *x = xfrm_state_alloc(net);
int err = -ENOMEM;
if (!x)
goto error_no_put;
copy_from_user_state(x, p);
if ((err = attach_aead(&x->aead, &x->props.ealgo,
attrs[XFRMA_ALG_AEAD])))
goto error;
if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
attrs[XFRMA_ALG_AUTH_TRUNC])))
goto error;
if (!x->props.aalgo) {
if ((err = attach_auth(&x->aalg, &x->props.aalgo,
attrs[XFRMA_ALG_AUTH])))
goto error;
}
if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
xfrm_ealg_get_byname,
attrs[XFRMA_ALG_CRYPT])))
goto error;
if ((err = attach_one_algo(&x->calg, &x->props.calgo,
xfrm_calg_get_byname,
attrs[XFRMA_ALG_COMP])))
goto error;
if (attrs[XFRMA_ENCAP]) {
x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
sizeof(*x->encap), GFP_KERNEL);
if (x->encap == NULL)
goto error;
}
if (attrs[XFRMA_TFCPAD])
x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
if (attrs[XFRMA_COADDR]) {
x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
sizeof(*x->coaddr), GFP_KERNEL);
if (x->coaddr == NULL)
goto error;
}
xfrm_mark_get(attrs, &x->mark);
err = __xfrm_init_state(x, false);
if (err)
goto error;
if (attrs[XFRMA_SEC_CTX] &&
security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
goto error;
if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
attrs[XFRMA_REPLAY_ESN_VAL])))
goto error;
x->km.seq = p->seq;
x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
/* sysctl_xfrm_aevent_etime is in 100ms units */
x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
if ((err = xfrm_init_replay(x)))
goto error;
/* override default values from above */
xfrm_update_ae_params(x, attrs);
return x;
error:
x->km.state = XFRM_STATE_DEAD;
xfrm_state_put(x);
error_no_put:
*errp = err;
return NULL;
}
static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_usersa_info *p = nlmsg_data(nlh);
struct xfrm_state *x;
int err;
struct km_event c;
uid_t loginuid = audit_get_loginuid(current);
u32 sessionid = audit_get_sessionid(current);
u32 sid;
err = verify_newsa_info(p, attrs);
if (err)
return err;
x = xfrm_state_construct(net, p, attrs, &err);
if (!x)
return err;
xfrm_state_hold(x);
if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
err = xfrm_state_add(x);
else
err = xfrm_state_update(x);
security_task_getsecid(current, &sid);
xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
if (err < 0) {
x->km.state = XFRM_STATE_DEAD;
__xfrm_state_put(x);
goto out;
}
c.seq = nlh->nlmsg_seq;
c.pid = nlh->nlmsg_pid;
c.event = nlh->nlmsg_type;
km_state_notify(x, &c);
out:
xfrm_state_put(x);
return err;
}
static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
struct xfrm_usersa_id *p,
struct nlattr **attrs,
int *errp)
{
struct xfrm_state *x = NULL;
struct xfrm_mark m;
int err;
u32 mark = xfrm_mark_get(attrs, &m);
if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
err = -ESRCH;
x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
} else {
xfrm_address_t *saddr = NULL;
verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
if (!saddr) {
err = -EINVAL;
goto out;
}
err = -ESRCH;
x = xfrm_state_lookup_byaddr(net, mark,
&p->daddr, saddr,
p->proto, p->family);
}
out:
if (!x && errp)
*errp = err;
return x;
}
static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
int err = -ESRCH;
struct km_event c;
struct xfrm_usersa_id *p = nlmsg_data(nlh);
uid_t loginuid = audit_get_loginuid(current);
u32 sessionid = audit_get_sessionid(current);
u32 sid;
x = xfrm_user_state_lookup(net, p, attrs, &err);
if (x == NULL)
return err;
if ((err = security_xfrm_state_delete(x)) != 0)
goto out;
if (xfrm_state_kern(x)) {
err = -EPERM;
goto out;
}
err = xfrm_state_delete(x);
if (err < 0)
goto out;
c.seq = nlh->nlmsg_seq;
c.pid = nlh->nlmsg_pid;
c.event = nlh->nlmsg_type;
km_state_notify(x, &c);
out:
security_task_getsecid(current, &sid);
xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
xfrm_state_put(x);
return err;
}
static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
{
memset(p, 0, sizeof(*p));
memcpy(&p->id, &x->id, sizeof(p->id));
memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft));
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
memcpy(&p->stats, &x->stats, sizeof(p->stats));
memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
p->mode = x->props.mode;
p->replay_window = x->props.replay_window;
p->reqid = x->props.reqid;
p->family = x->props.family;
p->flags = x->props.flags;
p->seq = x->km.seq;
}
struct xfrm_dump_info {
struct sk_buff *in_skb;
struct sk_buff *out_skb;
u32 nlmsg_seq;
u16 nlmsg_flags;
};
static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
{
struct xfrm_user_sec_ctx *uctx;
struct nlattr *attr;
int ctx_size = sizeof(*uctx) + s->ctx_len;
attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
if (attr == NULL)
return -EMSGSIZE;
uctx = nla_data(attr);
uctx->exttype = XFRMA_SEC_CTX;
uctx->len = ctx_size;
uctx->ctx_doi = s->ctx_doi;
uctx->ctx_alg = s->ctx_alg;
uctx->ctx_len = s->ctx_len;
memcpy(uctx + 1, s->ctx_str, s->ctx_len);
return 0;
}
static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
{
struct xfrm_algo *algo;
struct nlattr *nla;
nla = nla_reserve(skb, XFRMA_ALG_AUTH,
sizeof(*algo) + (auth->alg_key_len + 7) / 8);
if (!nla)
return -EMSGSIZE;
algo = nla_data(nla);
strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
algo->alg_key_len = auth->alg_key_len;
return 0;
}
/* Don't change this without updating xfrm_sa_len! */
static int copy_to_user_state_extra(struct xfrm_state *x,
struct xfrm_usersa_info *p,
struct sk_buff *skb)
{
int ret = 0;
copy_to_user_state(x, p);
if (x->coaddr) {
ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
if (ret)
goto out;
}
if (x->lastused) {
ret = nla_put_u64(skb, XFRMA_LASTUSED, x->lastused);
if (ret)
goto out;
}
if (x->aead) {
ret = nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
if (ret)
goto out;
}
if (x->aalg) {
ret = copy_to_user_auth(x->aalg, skb);
if (!ret)
ret = nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
xfrm_alg_auth_len(x->aalg), x->aalg);
if (ret)
goto out;
}
if (x->ealg) {
ret = nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
if (ret)
goto out;
}
if (x->calg) {
ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
if (ret)
goto out;
}
if (x->encap) {
ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
if (ret)
goto out;
}
if (x->tfcpad) {
ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
if (ret)
goto out;
}
ret = xfrm_mark_put(skb, &x->mark);
if (ret)
goto out;
if (x->replay_esn) {
ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
xfrm_replay_state_esn_len(x->replay_esn),
x->replay_esn);
if (ret)
goto out;
}
if (x->security)
ret = copy_sec_ctx(x->security, skb);
out:
return ret;
}
static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
{
struct xfrm_dump_info *sp = ptr;
struct sk_buff *in_skb = sp->in_skb;
struct sk_buff *skb = sp->out_skb;
struct xfrm_usersa_info *p;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
if (nlh == NULL)
return -EMSGSIZE;
p = nlmsg_data(nlh);
err = copy_to_user_state_extra(x, p, skb);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_dump_sa_done(struct netlink_callback *cb)
{
struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
xfrm_state_walk_done(walk);
return 0;
}
static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
struct xfrm_dump_info info;
BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
sizeof(cb->args) - sizeof(cb->args[0]));
info.in_skb = cb->skb;
info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI;
if (!cb->args[0]) {
cb->args[0] = 1;
xfrm_state_walk_init(walk, 0);
}
(void) xfrm_state_walk(net, walk, dump_one_state, &info);
return skb->len;
}
static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
struct xfrm_state *x, u32 seq)
{
struct xfrm_dump_info info;
struct sk_buff *skb;
int err;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
return ERR_PTR(-ENOMEM);
info.in_skb = in_skb;
info.out_skb = skb;
info.nlmsg_seq = seq;
info.nlmsg_flags = 0;
err = dump_one_state(x, 0, &info);
if (err) {
kfree_skb(skb);
return ERR_PTR(err);
}
return skb;
}
static inline size_t xfrm_spdinfo_msgsize(void)
{
return NLMSG_ALIGN(4)
+ nla_total_size(sizeof(struct xfrmu_spdinfo))
+ nla_total_size(sizeof(struct xfrmu_spdhinfo));
}
static int build_spdinfo(struct sk_buff *skb, struct net *net,
u32 pid, u32 seq, u32 flags)
{
struct xfrmk_spdinfo si;
struct xfrmu_spdinfo spc;
struct xfrmu_spdhinfo sph;
struct nlmsghdr *nlh;
int err;
u32 *f;
nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
if (nlh == NULL) /* shouldn't really happen ... */
return -EMSGSIZE;
f = nlmsg_data(nlh);
*f = flags;
xfrm_spd_getinfo(net, &si);
spc.incnt = si.incnt;
spc.outcnt = si.outcnt;
spc.fwdcnt = si.fwdcnt;
spc.inscnt = si.inscnt;
spc.outscnt = si.outscnt;
spc.fwdscnt = si.fwdscnt;
sph.spdhcnt = si.spdhcnt;
sph.spdhmcnt = si.spdhmcnt;
err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
if (!err)
err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
return nlmsg_end(skb, nlh);
}
static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct sk_buff *r_skb;
u32 *flags = nlmsg_data(nlh);
u32 spid = NETLINK_CB(skb).pid;
u32 seq = nlh->nlmsg_seq;
r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
if (r_skb == NULL)
return -ENOMEM;
if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0)
BUG();
return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
}
static inline size_t xfrm_sadinfo_msgsize(void)
{
return NLMSG_ALIGN(4)
+ nla_total_size(sizeof(struct xfrmu_sadhinfo))
+ nla_total_size(4); /* XFRMA_SAD_CNT */
}
static int build_sadinfo(struct sk_buff *skb, struct net *net,
u32 pid, u32 seq, u32 flags)
{
struct xfrmk_sadinfo si;
struct xfrmu_sadhinfo sh;
struct nlmsghdr *nlh;
int err;
u32 *f;
nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
if (nlh == NULL) /* shouldn't really happen ... */
return -EMSGSIZE;
f = nlmsg_data(nlh);
*f = flags;
xfrm_sad_getinfo(net, &si);
sh.sadhmcnt = si.sadhmcnt;
sh.sadhcnt = si.sadhcnt;
err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
if (!err)
err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
return nlmsg_end(skb, nlh);
}
static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct sk_buff *r_skb;
u32 *flags = nlmsg_data(nlh);
u32 spid = NETLINK_CB(skb).pid;
u32 seq = nlh->nlmsg_seq;
r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
if (r_skb == NULL)
return -ENOMEM;
if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0)
BUG();
return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
}
static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_usersa_id *p = nlmsg_data(nlh);
struct xfrm_state *x;
struct sk_buff *resp_skb;
int err = -ESRCH;
x = xfrm_user_state_lookup(net, p, attrs, &err);
if (x == NULL)
goto out_noput;
resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
if (IS_ERR(resp_skb)) {
err = PTR_ERR(resp_skb);
} else {
err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
}
xfrm_state_put(x);
out_noput:
return err;
}
static int verify_userspi_info(struct xfrm_userspi_info *p)
{
switch (p->info.id.proto) {
case IPPROTO_AH:
case IPPROTO_ESP:
break;
case IPPROTO_COMP:
/* IPCOMP spi is 16-bits. */
if (p->max >= 0x10000)
return -EINVAL;
break;
default:
return -EINVAL;
}
if (p->min > p->max)
return -EINVAL;
return 0;
}
static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
struct xfrm_userspi_info *p;
struct sk_buff *resp_skb;
xfrm_address_t *daddr;
int family;
int err;
u32 mark;
struct xfrm_mark m;
p = nlmsg_data(nlh);
err = verify_userspi_info(p);
if (err)
goto out_noput;
family = p->info.family;
daddr = &p->info.id.daddr;
x = NULL;
mark = xfrm_mark_get(attrs, &m);
if (p->info.seq) {
x = xfrm_find_acq_byseq(net, mark, p->info.seq);
if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
xfrm_state_put(x);
x = NULL;
}
}
if (!x)
x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
p->info.id.proto, daddr,
&p->info.saddr, 1,
family);
err = -ENOENT;
if (x == NULL)
goto out_noput;
err = xfrm_alloc_spi(x, p->min, p->max);
if (err)
goto out;
resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
if (IS_ERR(resp_skb)) {
err = PTR_ERR(resp_skb);
goto out;
}
err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
out:
xfrm_state_put(x);
out_noput:
return err;
}
static int verify_policy_dir(u8 dir)
{
switch (dir) {
case XFRM_POLICY_IN:
case XFRM_POLICY_OUT:
case XFRM_POLICY_FWD:
break;
default:
return -EINVAL;
}
return 0;
}
static int verify_policy_type(u8 type)
{
switch (type) {
case XFRM_POLICY_TYPE_MAIN:
#ifdef CONFIG_XFRM_SUB_POLICY
case XFRM_POLICY_TYPE_SUB:
#endif
break;
default:
return -EINVAL;
}
return 0;
}
static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
{
switch (p->share) {
case XFRM_SHARE_ANY:
case XFRM_SHARE_SESSION:
case XFRM_SHARE_USER:
case XFRM_SHARE_UNIQUE:
break;
default:
return -EINVAL;
}
switch (p->action) {
case XFRM_POLICY_ALLOW:
case XFRM_POLICY_BLOCK:
break;
default:
return -EINVAL;
}
switch (p->sel.family) {
case AF_INET:
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
break;
#else
return -EAFNOSUPPORT;
#endif
default:
return -EINVAL;
}
return verify_policy_dir(p->dir);
}
static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_user_sec_ctx *uctx;
if (!rt)
return 0;
uctx = nla_data(rt);
return security_xfrm_policy_alloc(&pol->security, uctx);
}
static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
int nr)
{
int i;
xp->xfrm_nr = nr;
for (i = 0; i < nr; i++, ut++) {
struct xfrm_tmpl *t = &xp->xfrm_vec[i];
memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
memcpy(&t->saddr, &ut->saddr,
sizeof(xfrm_address_t));
t->reqid = ut->reqid;
t->mode = ut->mode;
t->share = ut->share;
t->optional = ut->optional;
t->aalgos = ut->aalgos;
t->ealgos = ut->ealgos;
t->calgos = ut->calgos;
/* If all masks are ~0, then we allow all algorithms. */
t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
t->encap_family = ut->family;
}
}
static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
{
int i;
if (nr > XFRM_MAX_DEPTH)
return -EINVAL;
for (i = 0; i < nr; i++) {
/* We never validated the ut->family value, so many
* applications simply leave it at zero. The check was
* never made and ut->family was ignored because all
* templates could be assumed to have the same family as
* the policy itself. Now that we will have ipv4-in-ipv6
* and ipv6-in-ipv4 tunnels, this is no longer true.
*/
if (!ut[i].family)
ut[i].family = family;
switch (ut[i].family) {
case AF_INET:
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
break;
#endif
default:
return -EINVAL;
}
}
return 0;
}
static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_TMPL];
if (!rt) {
pol->xfrm_nr = 0;
} else {
struct xfrm_user_tmpl *utmpl = nla_data(rt);
int nr = nla_len(rt) / sizeof(*utmpl);
int err;
err = validate_tmpl(nr, utmpl, pol->family);
if (err)
return err;
copy_templates(pol, utmpl, nr);
}
return 0;
}
static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
struct xfrm_userpolicy_type *upt;
u8 type = XFRM_POLICY_TYPE_MAIN;
int err;
if (rt) {
upt = nla_data(rt);
type = upt->type;
}
err = verify_policy_type(type);
if (err)
return err;
*tp = type;
return 0;
}
static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
{
xp->priority = p->priority;
xp->index = p->index;
memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
xp->action = p->action;
xp->flags = p->flags;
xp->family = p->sel.family;
/* XXX xp->share = p->share; */
}
static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
{
memset(p, 0, sizeof(*p));
memcpy(&p->sel, &xp->selector, sizeof(p->sel));
memcpy(&p->lft, &xp->lft, sizeof(p->lft));
memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
p->priority = xp->priority;
p->index = xp->index;
p->sel.family = xp->family;
p->dir = dir;
p->action = xp->action;
p->flags = xp->flags;
p->share = XFRM_SHARE_ANY; /* XXX xp->share */
}
static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
{
struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
int err;
if (!xp) {
*errp = -ENOMEM;
return NULL;
}
copy_from_user_policy(xp, p);
err = copy_from_user_policy_type(&xp->type, attrs);
if (err)
goto error;
if (!(err = copy_from_user_tmpl(xp, attrs)))
err = copy_from_user_sec_ctx(xp, attrs);
if (err)
goto error;
xfrm_mark_get(attrs, &xp->mark);
return xp;
error:
*errp = err;
xp->walk.dead = 1;
xfrm_policy_destroy(xp);
return NULL;
}
static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
struct xfrm_policy *xp;
struct km_event c;
int err;
int excl;
uid_t loginuid = audit_get_loginuid(current);
u32 sessionid = audit_get_sessionid(current);
u32 sid;
err = verify_newpolicy_info(p);
if (err)
return err;
err = verify_sec_ctx_len(attrs);
if (err)
return err;
xp = xfrm_policy_construct(net, p, attrs, &err);
if (!xp)
return err;
/* shouldn't excl be based on nlh flags??
* Aha! this is anti-netlink really i.e more pfkey derived
* in netlink excl is a flag and you wouldnt need
* a type XFRM_MSG_UPDPOLICY - JHS */
excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
err = xfrm_policy_insert(p->dir, xp, excl);
security_task_getsecid(current, &sid);
xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
if (err) {
security_xfrm_policy_free(xp->security);
kfree(xp);
return err;
}
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
c.pid = nlh->nlmsg_pid;
km_policy_notify(xp, p->dir, &c);
xfrm_pol_put(xp);
return 0;
}
static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
{
struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
int i;
if (xp->xfrm_nr == 0)
return 0;
for (i = 0; i < xp->xfrm_nr; i++) {
struct xfrm_user_tmpl *up = &vec[i];
struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
memset(up, 0, sizeof(*up));
memcpy(&up->id, &kp->id, sizeof(up->id));
up->family = kp->encap_family;
memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
up->reqid = kp->reqid;
up->mode = kp->mode;
up->share = kp->share;
up->optional = kp->optional;
up->aalgos = kp->aalgos;
up->ealgos = kp->ealgos;
up->calgos = kp->calgos;
}
return nla_put(skb, XFRMA_TMPL,
sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
}
static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
{
if (x->security) {
return copy_sec_ctx(x->security, skb);
}
return 0;
}
static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
{
if (xp->security)
return copy_sec_ctx(xp->security, skb);
return 0;
}
static inline size_t userpolicy_type_attrsize(void)
{
#ifdef CONFIG_XFRM_SUB_POLICY
return nla_total_size(sizeof(struct xfrm_userpolicy_type));
#else
return 0;
#endif
}
#ifdef CONFIG_XFRM_SUB_POLICY
static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
{
struct xfrm_userpolicy_type upt = {
.type = type,
};
return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
}
#else
static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
{
return 0;
}
#endif
static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
{
struct xfrm_dump_info *sp = ptr;
struct xfrm_userpolicy_info *p;
struct sk_buff *in_skb = sp->in_skb;
struct sk_buff *skb = sp->out_skb;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
if (nlh == NULL)
return -EMSGSIZE;
p = nlmsg_data(nlh);
copy_to_user_policy(xp, p, dir);
err = copy_to_user_tmpl(xp, skb);
if (!err)
err = copy_to_user_sec_ctx(xp, skb);
if (!err)
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
nlmsg_end(skb, nlh);
return 0;
}
static int xfrm_dump_policy_done(struct netlink_callback *cb)
{
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
xfrm_policy_walk_done(walk);
return 0;
}
static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
struct xfrm_dump_info info;
BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
sizeof(cb->args) - sizeof(cb->args[0]));
info.in_skb = cb->skb;
info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI;
if (!cb->args[0]) {
cb->args[0] = 1;
xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
}
(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
return skb->len;
}
static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
struct xfrm_policy *xp,
int dir, u32 seq)
{
struct xfrm_dump_info info;
struct sk_buff *skb;
int err;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return ERR_PTR(-ENOMEM);
info.in_skb = in_skb;
info.out_skb = skb;
info.nlmsg_seq = seq;
info.nlmsg_flags = 0;
err = dump_one_policy(xp, dir, 0, &info);
if (err) {
kfree_skb(skb);
return ERR_PTR(err);
}
return skb;
}
static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy *xp;
struct xfrm_userpolicy_id *p;
u8 type = XFRM_POLICY_TYPE_MAIN;
int err;
struct km_event c;
int delete;
struct xfrm_mark m;
u32 mark = xfrm_mark_get(attrs, &m);
p = nlmsg_data(nlh);
delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
err = copy_from_user_policy_type(&type, attrs);
if (err)
return err;
err = verify_policy_dir(p->dir);
if (err)
return err;
if (p->index)
xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err);
else {
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_sec_ctx *ctx;
err = verify_sec_ctx_len(attrs);
if (err)
return err;
ctx = NULL;
if (rt) {
struct xfrm_user_sec_ctx *uctx = nla_data(rt);
err = security_xfrm_policy_alloc(&ctx, uctx);
if (err)
return err;
}
xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel,
ctx, delete, &err);
security_xfrm_policy_free(ctx);
}
if (xp == NULL)
return -ENOENT;
if (!delete) {
struct sk_buff *resp_skb;
resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
if (IS_ERR(resp_skb)) {
err = PTR_ERR(resp_skb);
} else {
err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
NETLINK_CB(skb).pid);
}
} else {
uid_t loginuid = audit_get_loginuid(current);
u32 sessionid = audit_get_sessionid(current);
u32 sid;
security_task_getsecid(current, &sid);
xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
sid);
if (err != 0)
goto out;
c.data.byid = p->index;
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
c.pid = nlh->nlmsg_pid;
km_policy_notify(xp, p->dir, &c);
}
out:
xfrm_pol_put(xp);
return err;
}
static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct km_event c;
struct xfrm_usersa_flush *p = nlmsg_data(nlh);
struct xfrm_audit audit_info;
int err;
audit_info.loginuid = audit_get_loginuid(current);
audit_info.sessionid = audit_get_sessionid(current);
security_task_getsecid(current, &audit_info.secid);
err = xfrm_state_flush(net, p->proto, &audit_info);
if (err) {
if (err == -ESRCH) /* empty table */
return 0;
return err;
}
c.data.proto = p->proto;
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
c.pid = nlh->nlmsg_pid;
c.net = net;
km_state_notify(NULL, &c);
return 0;
}
static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x)
{
size_t replay_size = x->replay_esn ?
xfrm_replay_state_esn_len(x->replay_esn) :
sizeof(struct xfrm_replay_state);
return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
+ nla_total_size(replay_size)
+ nla_total_size(sizeof(struct xfrm_lifetime_cur))
+ nla_total_size(sizeof(struct xfrm_mark))
+ nla_total_size(4) /* XFRM_AE_RTHR */
+ nla_total_size(4); /* XFRM_AE_ETHR */
}
static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
{
struct xfrm_aevent_id *id;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
if (nlh == NULL)
return -EMSGSIZE;
id = nlmsg_data(nlh);
memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr));
id->sa_id.spi = x->id.spi;
id->sa_id.family = x->props.family;
id->sa_id.proto = x->id.proto;
memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr));
id->reqid = x->props.reqid;
id->flags = c->data.aevent;
if (x->replay_esn) {
err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
xfrm_replay_state_esn_len(x->replay_esn),
x->replay_esn);
} else {
err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
&x->replay);
}
if (err)
goto out_cancel;
err = nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
if (err)
goto out_cancel;
if (id->flags & XFRM_AE_RTHR) {
err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
if (err)
goto out_cancel;
}
if (id->flags & XFRM_AE_ETHR) {
err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
x->replay_maxage * 10 / HZ);
if (err)
goto out_cancel;
}
err = xfrm_mark_put(skb, &x->mark);
if (err)
goto out_cancel;
return nlmsg_end(skb, nlh);
out_cancel:
nlmsg_cancel(skb, nlh);
return err;
}
static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
struct sk_buff *r_skb;
int err;
struct km_event c;
u32 mark;
struct xfrm_mark m;
struct xfrm_aevent_id *p = nlmsg_data(nlh);
struct xfrm_usersa_id *id = &p->sa_id;
mark = xfrm_mark_get(attrs, &m);
x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
if (x == NULL)
return -ESRCH;
r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
if (r_skb == NULL) {
xfrm_state_put(x);
return -ENOMEM;
}
/*
* XXX: is this lock really needed - none of the other
* gets lock (the concern is things getting updated
* while we are still reading) - jhs
*/
spin_lock_bh(&x->lock);
c.data.aevent = p->flags;
c.seq = nlh->nlmsg_seq;
c.pid = nlh->nlmsg_pid;
if (build_aevent(r_skb, x, &c) < 0)
BUG();
err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).pid);
spin_unlock_bh(&x->lock);
xfrm_state_put(x);
return err;
}
static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
struct km_event c;
int err = - EINVAL;
u32 mark = 0;
struct xfrm_mark m;
struct xfrm_aevent_id *p = nlmsg_data(nlh);
struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
if (!lt && !rp && !re)
return err;
/* pedantic mode - thou shalt sayeth replaceth */
if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
return err;
mark = xfrm_mark_get(attrs, &m);
x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
if (x == NULL)
return -ESRCH;
if (x->km.state != XFRM_STATE_VALID)
goto out;
err = xfrm_replay_verify_len(x->replay_esn, rp);
if (err)
goto out;
spin_lock_bh(&x->lock);
xfrm_update_ae_params(x, attrs);
spin_unlock_bh(&x->lock);
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
c.pid = nlh->nlmsg_pid;
c.data.aevent = XFRM_AE_CU;
km_state_notify(x, &c);
err = 0;
out:
xfrm_state_put(x);
return err;
}
static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct km_event c;
u8 type = XFRM_POLICY_TYPE_MAIN;
int err;
struct xfrm_audit audit_info;
err = copy_from_user_policy_type(&type, attrs);
if (err)
return err;
audit_info.loginuid = audit_get_loginuid(current);
audit_info.sessionid = audit_get_sessionid(current);
security_task_getsecid(current, &audit_info.secid);
err = xfrm_policy_flush(net, type, &audit_info);
if (err) {
if (err == -ESRCH) /* empty table */
return 0;
return err;
}
c.data.type = type;
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
c.pid = nlh->nlmsg_pid;
c.net = net;
km_policy_notify(NULL, 0, &c);
return 0;
}
static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy *xp;
struct xfrm_user_polexpire *up = nlmsg_data(nlh);
struct xfrm_userpolicy_info *p = &up->pol;
u8 type = XFRM_POLICY_TYPE_MAIN;
int err = -ENOENT;
struct xfrm_mark m;
u32 mark = xfrm_mark_get(attrs, &m);
err = copy_from_user_policy_type(&type, attrs);
if (err)
return err;
err = verify_policy_dir(p->dir);
if (err)
return err;
if (p->index)
xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
else {
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_sec_ctx *ctx;
err = verify_sec_ctx_len(attrs);
if (err)
return err;
ctx = NULL;
if (rt) {
struct xfrm_user_sec_ctx *uctx = nla_data(rt);
err = security_xfrm_policy_alloc(&ctx, uctx);
if (err)
return err;
}
xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir,
&p->sel, ctx, 0, &err);
security_xfrm_policy_free(ctx);
}
if (xp == NULL)
return -ENOENT;
if (unlikely(xp->walk.dead))
goto out;
err = 0;
if (up->hard) {
uid_t loginuid = audit_get_loginuid(current);
u32 sessionid = audit_get_sessionid(current);
u32 sid;
security_task_getsecid(current, &sid);
xfrm_policy_delete(xp, p->dir);
xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
} else {
// reset the timers here?
WARN(1, "Dont know what to do with soft policy expire\n");
}
km_policy_expired(xp, p->dir, up->hard, current->pid);
out:
xfrm_pol_put(xp);
return err;
}
static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
int err;
struct xfrm_user_expire *ue = nlmsg_data(nlh);
struct xfrm_usersa_info *p = &ue->state;
struct xfrm_mark m;
u32 mark = xfrm_mark_get(attrs, &m);
x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
err = -ENOENT;
if (x == NULL)
return err;
spin_lock_bh(&x->lock);
err = -EINVAL;
if (x->km.state != XFRM_STATE_VALID)
goto out;
km_state_expired(x, ue->hard, current->pid);
if (ue->hard) {
uid_t loginuid = audit_get_loginuid(current);
u32 sessionid = audit_get_sessionid(current);
u32 sid;
security_task_getsecid(current, &sid);
__xfrm_state_delete(x);
xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
}
err = 0;
out:
spin_unlock_bh(&x->lock);
xfrm_state_put(x);
return err;
}
static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy *xp;
struct xfrm_user_tmpl *ut;
int i;
struct nlattr *rt = attrs[XFRMA_TMPL];
struct xfrm_mark mark;
struct xfrm_user_acquire *ua = nlmsg_data(nlh);
struct xfrm_state *x = xfrm_state_alloc(net);
int err = -ENOMEM;
if (!x)
goto nomem;
xfrm_mark_get(attrs, &mark);
err = verify_newpolicy_info(&ua->policy);
if (err)
goto bad_policy;
/* build an XP */
xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
if (!xp)
goto free_state;
memcpy(&x->id, &ua->id, sizeof(ua->id));
memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
xp->mark.m = x->mark.m = mark.m;
xp->mark.v = x->mark.v = mark.v;
ut = nla_data(rt);
/* extract the templates and for each call km_key */
for (i = 0; i < xp->xfrm_nr; i++, ut++) {
struct xfrm_tmpl *t = &xp->xfrm_vec[i];
memcpy(&x->id, &t->id, sizeof(x->id));
x->props.mode = t->mode;
x->props.reqid = t->reqid;
x->props.family = ut->family;
t->aalgos = ua->aalgos;
t->ealgos = ua->ealgos;
t->calgos = ua->calgos;
err = km_query(x, t, xp);
}
kfree(x);
kfree(xp);
return 0;
bad_policy:
WARN(1, "BAD policy passed\n");
free_state:
kfree(x);
nomem:
return err;
}
#ifdef CONFIG_XFRM_MIGRATE
static int copy_from_user_migrate(struct xfrm_migrate *ma,
struct xfrm_kmaddress *k,
struct nlattr **attrs, int *num)
{
struct nlattr *rt = attrs[XFRMA_MIGRATE];
struct xfrm_user_migrate *um;
int i, num_migrate;
if (k != NULL) {
struct xfrm_user_kmaddress *uk;
uk = nla_data(attrs[XFRMA_KMADDRESS]);
memcpy(&k->local, &uk->local, sizeof(k->local));
memcpy(&k->remote, &uk->remote, sizeof(k->remote));
k->family = uk->family;
k->reserved = uk->reserved;
}
um = nla_data(rt);
num_migrate = nla_len(rt) / sizeof(*um);
if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
return -EINVAL;
for (i = 0; i < num_migrate; i++, um++, ma++) {
memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
ma->proto = um->proto;
ma->mode = um->mode;
ma->reqid = um->reqid;
ma->old_family = um->old_family;
ma->new_family = um->new_family;
}
*num = i;
return 0;
}
static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
struct xfrm_migrate m[XFRM_MAX_DEPTH];
struct xfrm_kmaddress km, *kmp;
u8 type;
int err;
int n = 0;
if (attrs[XFRMA_MIGRATE] == NULL)
return -EINVAL;
kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
err = copy_from_user_policy_type(&type, attrs);
if (err)
return err;
err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n);
if (err)
return err;
if (!n)
return 0;
xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp);
return 0;
}
#else
static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
return -ENOPROTOOPT;
}
#endif
#ifdef CONFIG_XFRM_MIGRATE
static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
{
struct xfrm_user_migrate um;
memset(&um, 0, sizeof(um));
um.proto = m->proto;
um.mode = m->mode;
um.reqid = m->reqid;
um.old_family = m->old_family;
memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
um.new_family = m->new_family;
memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
}
static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
{
struct xfrm_user_kmaddress uk;
memset(&uk, 0, sizeof(uk));
uk.family = k->family;
uk.reserved = k->reserved;
memcpy(&uk.local, &k->local, sizeof(uk.local));
memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
}
static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma)
{
return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
+ (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
+ nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
+ userpolicy_type_attrsize();
}
static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
int num_migrate, const struct xfrm_kmaddress *k,
const struct xfrm_selector *sel, u8 dir, u8 type)
{
const struct xfrm_migrate *mp;
struct xfrm_userpolicy_id *pol_id;
struct nlmsghdr *nlh;
int i, err;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
if (nlh == NULL)
return -EMSGSIZE;
pol_id = nlmsg_data(nlh);
/* copy data from selector, dir, and type to the pol_id */
memset(pol_id, 0, sizeof(*pol_id));
memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
pol_id->dir = dir;
if (k != NULL) {
err = copy_to_user_kmaddress(k, skb);
if (err)
goto out_cancel;
}
err = copy_to_user_policy_type(type, skb);
if (err)
goto out_cancel;
for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
err = copy_to_user_migrate(mp, skb);
if (err)
goto out_cancel;
}
return nlmsg_end(skb, nlh);
out_cancel:
nlmsg_cancel(skb, nlh);
return err;
}
static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
const struct xfrm_migrate *m, int num_migrate,
const struct xfrm_kmaddress *k)
{
struct net *net = &init_net;
struct sk_buff *skb;
skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
/* build migrate */
if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0)
BUG();
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
}
#else
static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
const struct xfrm_migrate *m, int num_migrate,
const struct xfrm_kmaddress *k)
{
return -ENOPROTOOPT;
}
#endif
#define XMSGSIZE(type) sizeof(struct type)
static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
[XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
[XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
[XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
[XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
[XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
[XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
[XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
[XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
[XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
[XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
[XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
[XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
[XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
[XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
[XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
[XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
[XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
[XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
};
#undef XMSGSIZE
static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
[XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
[XFRMA_LASTUSED] = { .type = NLA_U64},
[XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
[XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
[XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
[XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
[XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
[XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
[XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
[XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
[XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
[XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
[XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
[XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
[XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
[XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
[XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
[XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
[XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
[XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
[XFRMA_TFCPAD] = { .type = NLA_U32 },
[XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) },
};
static struct xfrm_link {
int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
int (*dump)(struct sk_buff *, struct netlink_callback *);
int (*done)(struct netlink_callback *);
} xfrm_dispatch[XFRM_NR_MSGTYPES] = {
[XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
[XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
[XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
.dump = xfrm_dump_sa,
.done = xfrm_dump_sa_done },
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
[XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
[XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
.dump = xfrm_dump_policy,
.done = xfrm_dump_policy_done },
[XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
[XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
[XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
[XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
[XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
[XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
[XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
[XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
[XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
[XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
[XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
[XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
};
static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct nlattr *attrs[XFRMA_MAX+1];
struct xfrm_link *link;
int type, err;
type = nlh->nlmsg_type;
if (type > XFRM_MSG_MAX)
return -EINVAL;
type -= XFRM_MSG_BASE;
link = &xfrm_dispatch[type];
/* All operations require privileges, even GET */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
(nlh->nlmsg_flags & NLM_F_DUMP)) {
if (link->dump == NULL)
return -EINVAL;
{
struct netlink_dump_control c = {
.dump = link->dump,
.done = link->done,
};
return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
}
}
err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,
xfrma_policy);
if (err < 0)
return err;
if (link->doit == NULL)
return -EINVAL;
return link->doit(skb, nlh, attrs);
}
static void xfrm_netlink_rcv(struct sk_buff *skb)
{
mutex_lock(&xfrm_cfg_mutex);
netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
mutex_unlock(&xfrm_cfg_mutex);
}
static inline size_t xfrm_expire_msgsize(void)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
+ nla_total_size(sizeof(struct xfrm_mark));
}
static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
{
struct xfrm_user_expire *ue;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
if (nlh == NULL)
return -EMSGSIZE;
ue = nlmsg_data(nlh);
copy_to_user_state(x, &ue->state);
ue->hard = (c->data.hard != 0) ? 1 : 0;
err = xfrm_mark_put(skb, &x->mark);
if (err)
return err;
return nlmsg_end(skb, nlh);
}
static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
{
struct net *net = xs_net(x);
struct sk_buff *skb;
skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
if (build_expire(skb, x, c) < 0) {
kfree_skb(skb);
return -EMSGSIZE;
}
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
}
static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
{
struct net *net = xs_net(x);
struct sk_buff *skb;
skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
if (build_aevent(skb, x, c) < 0)
BUG();
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
}
static int xfrm_notify_sa_flush(const struct km_event *c)
{
struct net *net = c->net;
struct xfrm_usersa_flush *p;
struct nlmsghdr *nlh;
struct sk_buff *skb;
int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
skb = nlmsg_new(len, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
if (nlh == NULL) {
kfree_skb(skb);
return -EMSGSIZE;
}
p = nlmsg_data(nlh);
p->proto = c->data.proto;
nlmsg_end(skb, nlh);
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
}
static inline size_t xfrm_sa_len(struct xfrm_state *x)
{
size_t l = 0;
if (x->aead)
l += nla_total_size(aead_len(x->aead));
if (x->aalg) {
l += nla_total_size(sizeof(struct xfrm_algo) +
(x->aalg->alg_key_len + 7) / 8);
l += nla_total_size(xfrm_alg_auth_len(x->aalg));
}
if (x->ealg)
l += nla_total_size(xfrm_alg_len(x->ealg));
if (x->calg)
l += nla_total_size(sizeof(*x->calg));
if (x->encap)
l += nla_total_size(sizeof(*x->encap));
if (x->tfcpad)
l += nla_total_size(sizeof(x->tfcpad));
if (x->replay_esn)
l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
if (x->security)
l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
x->security->ctx_len);
if (x->coaddr)
l += nla_total_size(sizeof(*x->coaddr));
/* Must count x->lastused as it may become non-zero behind our back. */
l += nla_total_size(sizeof(u64));
return l;
}
static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
{
struct net *net = xs_net(x);
struct xfrm_usersa_info *p;
struct xfrm_usersa_id *id;
struct nlmsghdr *nlh;
struct sk_buff *skb;
int len = xfrm_sa_len(x);
int headlen, err;
headlen = sizeof(*p);
if (c->event == XFRM_MSG_DELSA) {
len += nla_total_size(headlen);
headlen = sizeof(*id);
len += nla_total_size(sizeof(struct xfrm_mark));
}
len += NLMSG_ALIGN(headlen);
skb = nlmsg_new(len, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
err = -EMSGSIZE;
if (nlh == NULL)
goto out_free_skb;
p = nlmsg_data(nlh);
if (c->event == XFRM_MSG_DELSA) {
struct nlattr *attr;
id = nlmsg_data(nlh);
memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
id->spi = x->id.spi;
id->family = x->props.family;
id->proto = x->id.proto;
attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
err = -EMSGSIZE;
if (attr == NULL)
goto out_free_skb;
p = nla_data(attr);
}
err = copy_to_user_state_extra(x, p, skb);
if (err)
goto out_free_skb;
nlmsg_end(skb, nlh);
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
out_free_skb:
kfree_skb(skb);
return err;
}
static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
{
switch (c->event) {
case XFRM_MSG_EXPIRE:
return xfrm_exp_state_notify(x, c);
case XFRM_MSG_NEWAE:
return xfrm_aevent_state_notify(x, c);
case XFRM_MSG_DELSA:
case XFRM_MSG_UPDSA:
case XFRM_MSG_NEWSA:
return xfrm_notify_sa(x, c);
case XFRM_MSG_FLUSHSA:
return xfrm_notify_sa_flush(c);
default:
printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
c->event);
break;
}
return 0;
}
static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
struct xfrm_policy *xp)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
+ nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
+ nla_total_size(sizeof(struct xfrm_mark))
+ nla_total_size(xfrm_user_sec_ctx_size(x->security))
+ userpolicy_type_attrsize();
}
static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_tmpl *xt, struct xfrm_policy *xp,
int dir)
{
__u32 seq = xfrm_get_acqseq();
struct xfrm_user_acquire *ua;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
if (nlh == NULL)
return -EMSGSIZE;
ua = nlmsg_data(nlh);
memcpy(&ua->id, &x->id, sizeof(ua->id));
memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
copy_to_user_policy(xp, &ua->policy, dir);
ua->aalgos = xt->aalgos;
ua->ealgos = xt->ealgos;
ua->calgos = xt->calgos;
ua->seq = x->km.seq = seq;
err = copy_to_user_tmpl(xp, skb);
if (!err)
err = copy_to_user_state_sec_ctx(x, skb);
if (!err)
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
return nlmsg_end(skb, nlh);
}
static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
struct xfrm_policy *xp, int dir)
{
struct net *net = xs_net(x);
struct sk_buff *skb;
skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
if (build_acquire(skb, x, xt, xp, dir) < 0)
BUG();
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
}
/* User gives us xfrm_user_policy_info followed by an array of 0
* or more templates.
*/
static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
u8 *data, int len, int *dir)
{
struct net *net = sock_net(sk);
struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
struct xfrm_policy *xp;
int nr;
switch (sk->sk_family) {
case AF_INET:
if (opt != IP_XFRM_POLICY) {
*dir = -EOPNOTSUPP;
return NULL;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
if (opt != IPV6_XFRM_POLICY) {
*dir = -EOPNOTSUPP;
return NULL;
}
break;
#endif
default:
*dir = -EINVAL;
return NULL;
}
*dir = -EINVAL;
if (len < sizeof(*p) ||
verify_newpolicy_info(p))
return NULL;
nr = ((len - sizeof(*p)) / sizeof(*ut));
if (validate_tmpl(nr, ut, p->sel.family))
return NULL;
if (p->dir > XFRM_POLICY_OUT)
return NULL;
xp = xfrm_policy_alloc(net, GFP_ATOMIC);
if (xp == NULL) {
*dir = -ENOBUFS;
return NULL;
}
copy_from_user_policy(xp, p);
xp->type = XFRM_POLICY_TYPE_MAIN;
copy_templates(xp, ut, nr);
*dir = p->dir;
return xp;
}
static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
+ nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
+ nla_total_size(xfrm_user_sec_ctx_size(xp->security))
+ nla_total_size(sizeof(struct xfrm_mark))
+ userpolicy_type_attrsize();
}
static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
int dir, const struct km_event *c)
{
struct xfrm_user_polexpire *upe;
int hard = c->data.hard;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
if (nlh == NULL)
return -EMSGSIZE;
upe = nlmsg_data(nlh);
copy_to_user_policy(xp, &upe->pol, dir);
err = copy_to_user_tmpl(xp, skb);
if (!err)
err = copy_to_user_sec_ctx(xp, skb);
if (!err)
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
upe->hard = !!hard;
return nlmsg_end(skb, nlh);
}
static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
struct net *net = xp_net(xp);
struct sk_buff *skb;
skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
if (build_polexpire(skb, xp, dir, c) < 0)
BUG();
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
}
static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
struct net *net = xp_net(xp);
struct xfrm_userpolicy_info *p;
struct xfrm_userpolicy_id *id;
struct nlmsghdr *nlh;
struct sk_buff *skb;
int headlen, err;
headlen = sizeof(*p);
if (c->event == XFRM_MSG_DELPOLICY) {
len += nla_total_size(headlen);
headlen = sizeof(*id);
}
len += userpolicy_type_attrsize();
len += nla_total_size(sizeof(struct xfrm_mark));
len += NLMSG_ALIGN(headlen);
skb = nlmsg_new(len, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
err = -EMSGSIZE;
if (nlh == NULL)
goto out_free_skb;
p = nlmsg_data(nlh);
if (c->event == XFRM_MSG_DELPOLICY) {
struct nlattr *attr;
id = nlmsg_data(nlh);
memset(id, 0, sizeof(*id));
id->dir = dir;
if (c->data.byid)
id->index = xp->index;
else
memcpy(&id->sel, &xp->selector, sizeof(id->sel));
attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
err = -EMSGSIZE;
if (attr == NULL)
goto out_free_skb;
p = nla_data(attr);
}
copy_to_user_policy(xp, p, dir);
err = copy_to_user_tmpl(xp, skb);
if (!err)
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
if (err)
goto out_free_skb;
nlmsg_end(skb, nlh);
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
out_free_skb:
kfree_skb(skb);
return err;
}
static int xfrm_notify_policy_flush(const struct km_event *c)
{
struct net *net = c->net;
struct nlmsghdr *nlh;
struct sk_buff *skb;
int err;
skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
err = -EMSGSIZE;
if (nlh == NULL)
goto out_free_skb;
err = copy_to_user_policy_type(c->data.type, skb);
if (err)
goto out_free_skb;
nlmsg_end(skb, nlh);
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
out_free_skb:
kfree_skb(skb);
return err;
}
static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
switch (c->event) {
case XFRM_MSG_NEWPOLICY:
case XFRM_MSG_UPDPOLICY:
case XFRM_MSG_DELPOLICY:
return xfrm_notify_policy(xp, dir, c);
case XFRM_MSG_FLUSHPOLICY:
return xfrm_notify_policy_flush(c);
case XFRM_MSG_POLEXPIRE:
return xfrm_exp_policy_notify(xp, dir, c);
default:
printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
c->event);
}
return 0;
}
static inline size_t xfrm_report_msgsize(void)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
}
static int build_report(struct sk_buff *skb, u8 proto,
struct xfrm_selector *sel, xfrm_address_t *addr)
{
struct xfrm_user_report *ur;
struct nlmsghdr *nlh;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
if (nlh == NULL)
return -EMSGSIZE;
ur = nlmsg_data(nlh);
ur->proto = proto;
memcpy(&ur->sel, sel, sizeof(ur->sel));
if (addr) {
int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
}
}
return nlmsg_end(skb, nlh);
}
static int xfrm_send_report(struct net *net, u8 proto,
struct xfrm_selector *sel, xfrm_address_t *addr)
{
struct sk_buff *skb;
skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
if (build_report(skb, proto, sel, addr) < 0)
BUG();
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
}
static inline size_t xfrm_mapping_msgsize(void)
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
}
static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
xfrm_address_t *new_saddr, __be16 new_sport)
{
struct xfrm_user_mapping *um;
struct nlmsghdr *nlh;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
if (nlh == NULL)
return -EMSGSIZE;
um = nlmsg_data(nlh);
memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
um->id.spi = x->id.spi;
um->id.family = x->props.family;
um->id.proto = x->id.proto;
memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
um->new_sport = new_sport;
um->old_sport = x->encap->encap_sport;
um->reqid = x->props.reqid;
return nlmsg_end(skb, nlh);
}
static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
__be16 sport)
{
struct net *net = xs_net(x);
struct sk_buff *skb;
if (x->id.proto != IPPROTO_ESP)
return -EINVAL;
if (!x->encap)
return -EINVAL;
skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
if (build_mapping(skb, x, ipaddr, sport) < 0)
BUG();
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC);
}
static struct xfrm_mgr netlink_mgr = {
.id = "netlink",
.notify = xfrm_send_state_notify,
.acquire = xfrm_send_acquire,
.compile_policy = xfrm_compile_policy,
.notify_policy = xfrm_send_policy_notify,
.report = xfrm_send_report,
.migrate = xfrm_send_migrate,
.new_mapping = xfrm_send_mapping,
};
static int __net_init xfrm_user_net_init(struct net *net)
{
struct sock *nlsk;
struct netlink_kernel_cfg cfg = {
.groups = XFRMNLGRP_MAX,
.input = xfrm_netlink_rcv,
};
nlsk = netlink_kernel_create(net, NETLINK_XFRM, THIS_MODULE, &cfg);
if (nlsk == NULL)
return -ENOMEM;
net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
rcu_assign_pointer(net->xfrm.nlsk, nlsk);
return 0;
}
static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
{
struct net *net;
list_for_each_entry(net, net_exit_list, exit_list)
RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
synchronize_net();
list_for_each_entry(net, net_exit_list, exit_list)
netlink_kernel_release(net->xfrm.nlsk_stash);
}
static struct pernet_operations xfrm_user_net_ops = {
.init = xfrm_user_net_init,
.exit_batch = xfrm_user_net_exit,
};
static int __init xfrm_user_init(void)
{
int rv;
printk(KERN_INFO "Initializing XFRM netlink socket\n");
rv = register_pernet_subsys(&xfrm_user_net_ops);
if (rv < 0)
return rv;
rv = xfrm_register_km(&netlink_mgr);
if (rv < 0)
unregister_pernet_subsys(&xfrm_user_net_ops);
return rv;
}
static void __exit xfrm_user_exit(void)
{
xfrm_unregister_km(&netlink_mgr);
unregister_pernet_subsys(&xfrm_user_net_ops);
}
module_init(xfrm_user_init);
module_exit(xfrm_user_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_3822_1 |
crossvul-cpp_data_bad_5682_0 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Darryl Miles G7LED (dlm@g7led.demon.co.uk)
* Copyright (C) Steven Whitehouse GW7RRM (stevew@acm.org)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
* Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
* Copyright (C) Hans Alblas PE1AYX (hans@esrac.ele.tue.nl)
* Copyright (C) Frederic Rible F1OAT (frible@teaser.fr)
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/netfilter.h>
#include <linux/sysctl.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <net/net_namespace.h>
#include <net/tcp_states.h>
#include <net/ip.h>
#include <net/arp.h>
HLIST_HEAD(ax25_list);
DEFINE_SPINLOCK(ax25_list_lock);
static const struct proto_ops ax25_proto_ops;
static void ax25_free_sock(struct sock *sk)
{
ax25_cb_put(ax25_sk(sk));
}
/*
* Socket removal during an interrupt is now safe.
*/
static void ax25_cb_del(ax25_cb *ax25)
{
if (!hlist_unhashed(&ax25->ax25_node)) {
spin_lock_bh(&ax25_list_lock);
hlist_del_init(&ax25->ax25_node);
spin_unlock_bh(&ax25_list_lock);
ax25_cb_put(ax25);
}
}
/*
* Kill all bound sockets on a dropped device.
*/
static void ax25_kill_by_device(struct net_device *dev)
{
ax25_dev *ax25_dev;
ax25_cb *s;
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return;
spin_lock_bh(&ax25_list_lock);
again:
ax25_for_each(s, &ax25_list) {
if (s->ax25_dev == ax25_dev) {
s->ax25_dev = NULL;
spin_unlock_bh(&ax25_list_lock);
ax25_disconnect(s, ENETUNREACH);
spin_lock_bh(&ax25_list_lock);
/* The entry could have been deleted from the
* list meanwhile and thus the next pointer is
* no longer valid. Play it safe and restart
* the scan. Forward progress is ensured
* because we set s->ax25_dev to NULL and we
* are never passed a NULL 'dev' argument.
*/
goto again;
}
}
spin_unlock_bh(&ax25_list_lock);
}
/*
* Handle device status changes.
*/
static int ax25_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = (struct net_device *)ptr;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
/* Reject non AX.25 devices */
if (dev->type != ARPHRD_AX25)
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
ax25_dev_device_up(dev);
break;
case NETDEV_DOWN:
ax25_kill_by_device(dev);
ax25_rt_device_down(dev);
ax25_dev_device_down(dev);
break;
default:
break;
}
return NOTIFY_DONE;
}
/*
* Add a socket to the bound sockets list.
*/
void ax25_cb_add(ax25_cb *ax25)
{
spin_lock_bh(&ax25_list_lock);
ax25_cb_hold(ax25);
hlist_add_head(&ax25->ax25_node, &ax25_list);
spin_unlock_bh(&ax25_list_lock);
}
/*
* Find a socket that wants to accept the SABM we have just
* received.
*/
struct sock *ax25_find_listener(ax25_address *addr, int digi,
struct net_device *dev, int type)
{
ax25_cb *s;
spin_lock(&ax25_list_lock);
ax25_for_each(s, &ax25_list) {
if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
continue;
if (s->sk && !ax25cmp(&s->source_addr, addr) &&
s->sk->sk_type == type && s->sk->sk_state == TCP_LISTEN) {
/* If device is null we match any device */
if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) {
sock_hold(s->sk);
spin_unlock(&ax25_list_lock);
return s->sk;
}
}
}
spin_unlock(&ax25_list_lock);
return NULL;
}
/*
* Find an AX.25 socket given both ends.
*/
struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
int type)
{
struct sock *sk = NULL;
ax25_cb *s;
spin_lock(&ax25_list_lock);
ax25_for_each(s, &ax25_list) {
if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
!ax25cmp(&s->dest_addr, dest_addr) &&
s->sk->sk_type == type) {
sk = s->sk;
sock_hold(sk);
break;
}
}
spin_unlock(&ax25_list_lock);
return sk;
}
/*
* Find an AX.25 control block given both ends. It will only pick up
* floating AX.25 control blocks or non Raw socket bound control blocks.
*/
ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr,
ax25_digi *digi, struct net_device *dev)
{
ax25_cb *s;
spin_lock_bh(&ax25_list_lock);
ax25_for_each(s, &ax25_list) {
if (s->sk && s->sk->sk_type != SOCK_SEQPACKET)
continue;
if (s->ax25_dev == NULL)
continue;
if (ax25cmp(&s->source_addr, src_addr) == 0 && ax25cmp(&s->dest_addr, dest_addr) == 0 && s->ax25_dev->dev == dev) {
if (digi != NULL && digi->ndigi != 0) {
if (s->digipeat == NULL)
continue;
if (ax25digicmp(s->digipeat, digi) != 0)
continue;
} else {
if (s->digipeat != NULL && s->digipeat->ndigi != 0)
continue;
}
ax25_cb_hold(s);
spin_unlock_bh(&ax25_list_lock);
return s;
}
}
spin_unlock_bh(&ax25_list_lock);
return NULL;
}
EXPORT_SYMBOL(ax25_find_cb);
void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
{
ax25_cb *s;
struct sk_buff *copy;
spin_lock(&ax25_list_lock);
ax25_for_each(s, &ax25_list) {
if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
s->sk->sk_type == SOCK_RAW &&
s->sk->sk_protocol == proto &&
s->ax25_dev->dev == skb->dev &&
atomic_read(&s->sk->sk_rmem_alloc) <= s->sk->sk_rcvbuf) {
if ((copy = skb_clone(skb, GFP_ATOMIC)) == NULL)
continue;
if (sock_queue_rcv_skb(s->sk, copy) != 0)
kfree_skb(copy);
}
}
spin_unlock(&ax25_list_lock);
}
/*
* Deferred destroy.
*/
void ax25_destroy_socket(ax25_cb *);
/*
* Handler for deferred kills.
*/
static void ax25_destroy_timer(unsigned long data)
{
ax25_cb *ax25=(ax25_cb *)data;
struct sock *sk;
sk=ax25->sk;
bh_lock_sock(sk);
sock_hold(sk);
ax25_destroy_socket(ax25);
bh_unlock_sock(sk);
sock_put(sk);
}
/*
* This is called from user mode and the timers. Thus it protects itself
* against interrupt users but doesn't worry about being called during
* work. Once it is removed from the queue no interrupt or bottom half
* will touch it and we are (fairly 8-) ) safe.
*/
void ax25_destroy_socket(ax25_cb *ax25)
{
struct sk_buff *skb;
ax25_cb_del(ax25);
ax25_stop_heartbeat(ax25);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_stop_t3timer(ax25);
ax25_stop_idletimer(ax25);
ax25_clear_queues(ax25); /* Flush the queues */
if (ax25->sk != NULL) {
while ((skb = skb_dequeue(&ax25->sk->sk_receive_queue)) != NULL) {
if (skb->sk != ax25->sk) {
/* A pending connection */
ax25_cb *sax25 = ax25_sk(skb->sk);
/* Queue the unaccepted socket for death */
sock_orphan(skb->sk);
/* 9A4GL: hack to release unaccepted sockets */
skb->sk->sk_state = TCP_LISTEN;
ax25_start_heartbeat(sax25);
sax25->state = AX25_STATE_0;
}
kfree_skb(skb);
}
skb_queue_purge(&ax25->sk->sk_write_queue);
}
if (ax25->sk != NULL) {
if (sk_has_allocations(ax25->sk)) {
/* Defer: outstanding buffers */
setup_timer(&ax25->dtimer, ax25_destroy_timer,
(unsigned long)ax25);
ax25->dtimer.expires = jiffies + 2 * HZ;
add_timer(&ax25->dtimer);
} else {
struct sock *sk=ax25->sk;
ax25->sk=NULL;
sock_put(sk);
}
} else {
ax25_cb_put(ax25);
}
}
/*
* dl1bke 960311: set parameters for existing AX.25 connections,
* includes a KILL command to abort any connection.
* VERY useful for debugging ;-)
*/
static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
{
struct ax25_ctl_struct ax25_ctl;
ax25_digi digi;
ax25_dev *ax25_dev;
ax25_cb *ax25;
unsigned int k;
int ret = 0;
if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl)))
return -EFAULT;
if ((ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr)) == NULL)
return -ENODEV;
if (ax25_ctl.digi_count > AX25_MAX_DIGIS)
return -EINVAL;
if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL)
return -EINVAL;
digi.ndigi = ax25_ctl.digi_count;
for (k = 0; k < digi.ndigi; k++)
digi.calls[k] = ax25_ctl.digi_addr[k];
if ((ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev)) == NULL)
return -ENOTCONN;
switch (ax25_ctl.cmd) {
case AX25_KILL:
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
#ifdef CONFIG_AX25_DAMA_SLAVE
if (ax25_dev->dama.slave && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
ax25_dama_off(ax25);
#endif
ax25_disconnect(ax25, ENETRESET);
break;
case AX25_WINDOW:
if (ax25->modulus == AX25_MODULUS) {
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 7)
goto einval_put;
} else {
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 63)
goto einval_put;
}
ax25->window = ax25_ctl.arg;
break;
case AX25_T1:
if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
goto einval_put;
ax25->rtt = (ax25_ctl.arg * HZ) / 2;
ax25->t1 = ax25_ctl.arg * HZ;
break;
case AX25_T2:
if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
goto einval_put;
ax25->t2 = ax25_ctl.arg * HZ;
break;
case AX25_N2:
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 31)
goto einval_put;
ax25->n2count = 0;
ax25->n2 = ax25_ctl.arg;
break;
case AX25_T3:
if (ax25_ctl.arg > ULONG_MAX / HZ)
goto einval_put;
ax25->t3 = ax25_ctl.arg * HZ;
break;
case AX25_IDLE:
if (ax25_ctl.arg > ULONG_MAX / (60 * HZ))
goto einval_put;
ax25->idle = ax25_ctl.arg * 60 * HZ;
break;
case AX25_PACLEN:
if (ax25_ctl.arg < 16 || ax25_ctl.arg > 65535)
goto einval_put;
ax25->paclen = ax25_ctl.arg;
break;
default:
goto einval_put;
}
out_put:
ax25_cb_put(ax25);
return ret;
einval_put:
ret = -EINVAL;
goto out_put;
}
static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev)
{
ax25->rtt = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]) / 2;
ax25->t1 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]);
ax25->t2 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T2]);
ax25->t3 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T3]);
ax25->n2 = ax25_dev->values[AX25_VALUES_N2];
ax25->paclen = ax25_dev->values[AX25_VALUES_PACLEN];
ax25->idle = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_IDLE]);
ax25->backoff = ax25_dev->values[AX25_VALUES_BACKOFF];
if (ax25_dev->values[AX25_VALUES_AXDEFMODE]) {
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW];
} else {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25_dev->values[AX25_VALUES_WINDOW];
}
}
/*
* Fill in a created AX.25 created control block with the default
* values for a particular device.
*/
void ax25_fillin_cb(ax25_cb *ax25, ax25_dev *ax25_dev)
{
ax25->ax25_dev = ax25_dev;
if (ax25->ax25_dev != NULL) {
ax25_fillin_cb_from_dev(ax25, ax25_dev);
return;
}
/*
* No device, use kernel / AX.25 spec default values
*/
ax25->rtt = msecs_to_jiffies(AX25_DEF_T1) / 2;
ax25->t1 = msecs_to_jiffies(AX25_DEF_T1);
ax25->t2 = msecs_to_jiffies(AX25_DEF_T2);
ax25->t3 = msecs_to_jiffies(AX25_DEF_T3);
ax25->n2 = AX25_DEF_N2;
ax25->paclen = AX25_DEF_PACLEN;
ax25->idle = msecs_to_jiffies(AX25_DEF_IDLE);
ax25->backoff = AX25_DEF_BACKOFF;
if (AX25_DEF_AXDEFMODE) {
ax25->modulus = AX25_EMODULUS;
ax25->window = AX25_DEF_EWINDOW;
} else {
ax25->modulus = AX25_MODULUS;
ax25->window = AX25_DEF_WINDOW;
}
}
/*
* Create an empty AX.25 control block.
*/
ax25_cb *ax25_create_cb(void)
{
ax25_cb *ax25;
if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
return NULL;
atomic_set(&ax25->refcount, 1);
skb_queue_head_init(&ax25->write_queue);
skb_queue_head_init(&ax25->frag_queue);
skb_queue_head_init(&ax25->ack_queue);
skb_queue_head_init(&ax25->reseq_queue);
ax25_setup_timers(ax25);
ax25_fillin_cb(ax25, NULL);
ax25->state = AX25_STATE_0;
return ax25;
}
/*
* Handling for system calls applied via the various interfaces to an
* AX25 socket object
*/
static int ax25_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
ax25_cb *ax25;
struct net_device *dev;
char devname[IFNAMSIZ];
unsigned long opt;
int res = 0;
if (level != SOL_AX25)
return -ENOPROTOOPT;
if (optlen < sizeof(unsigned int))
return -EINVAL;
if (get_user(opt, (unsigned int __user *)optval))
return -EFAULT;
lock_sock(sk);
ax25 = ax25_sk(sk);
switch (optname) {
case AX25_WINDOW:
if (ax25->modulus == AX25_MODULUS) {
if (opt < 1 || opt > 7) {
res = -EINVAL;
break;
}
} else {
if (opt < 1 || opt > 63) {
res = -EINVAL;
break;
}
}
ax25->window = opt;
break;
case AX25_T1:
if (opt < 1 || opt > ULONG_MAX / HZ) {
res = -EINVAL;
break;
}
ax25->rtt = (opt * HZ) >> 1;
ax25->t1 = opt * HZ;
break;
case AX25_T2:
if (opt < 1 || opt > ULONG_MAX / HZ) {
res = -EINVAL;
break;
}
ax25->t2 = opt * HZ;
break;
case AX25_N2:
if (opt < 1 || opt > 31) {
res = -EINVAL;
break;
}
ax25->n2 = opt;
break;
case AX25_T3:
if (opt < 1 || opt > ULONG_MAX / HZ) {
res = -EINVAL;
break;
}
ax25->t3 = opt * HZ;
break;
case AX25_IDLE:
if (opt > ULONG_MAX / (60 * HZ)) {
res = -EINVAL;
break;
}
ax25->idle = opt * 60 * HZ;
break;
case AX25_BACKOFF:
if (opt > 2) {
res = -EINVAL;
break;
}
ax25->backoff = opt;
break;
case AX25_EXTSEQ:
ax25->modulus = opt ? AX25_EMODULUS : AX25_MODULUS;
break;
case AX25_PIDINCL:
ax25->pidincl = opt ? 1 : 0;
break;
case AX25_IAMDIGI:
ax25->iamdigi = opt ? 1 : 0;
break;
case AX25_PACLEN:
if (opt < 16 || opt > 65535) {
res = -EINVAL;
break;
}
ax25->paclen = opt;
break;
case SO_BINDTODEVICE:
if (optlen > IFNAMSIZ)
optlen = IFNAMSIZ;
if (copy_from_user(devname, optval, optlen)) {
res = -EFAULT;
break;
}
if (sk->sk_type == SOCK_SEQPACKET &&
(sock->state != SS_UNCONNECTED ||
sk->sk_state == TCP_LISTEN)) {
res = -EADDRNOTAVAIL;
break;
}
dev = dev_get_by_name(&init_net, devname);
if (!dev) {
res = -ENODEV;
break;
}
ax25->ax25_dev = ax25_dev_ax25dev(dev);
ax25_fillin_cb(ax25, ax25->ax25_dev);
dev_put(dev);
break;
default:
res = -ENOPROTOOPT;
}
release_sock(sk);
return res;
}
static int ax25_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
ax25_cb *ax25;
struct ax25_dev *ax25_dev;
char devname[IFNAMSIZ];
void *valptr;
int val = 0;
int maxlen, length;
if (level != SOL_AX25)
return -ENOPROTOOPT;
if (get_user(maxlen, optlen))
return -EFAULT;
if (maxlen < 1)
return -EFAULT;
valptr = (void *) &val;
length = min_t(unsigned int, maxlen, sizeof(int));
lock_sock(sk);
ax25 = ax25_sk(sk);
switch (optname) {
case AX25_WINDOW:
val = ax25->window;
break;
case AX25_T1:
val = ax25->t1 / HZ;
break;
case AX25_T2:
val = ax25->t2 / HZ;
break;
case AX25_N2:
val = ax25->n2;
break;
case AX25_T3:
val = ax25->t3 / HZ;
break;
case AX25_IDLE:
val = ax25->idle / (60 * HZ);
break;
case AX25_BACKOFF:
val = ax25->backoff;
break;
case AX25_EXTSEQ:
val = (ax25->modulus == AX25_EMODULUS);
break;
case AX25_PIDINCL:
val = ax25->pidincl;
break;
case AX25_IAMDIGI:
val = ax25->iamdigi;
break;
case AX25_PACLEN:
val = ax25->paclen;
break;
case SO_BINDTODEVICE:
ax25_dev = ax25->ax25_dev;
if (ax25_dev != NULL && ax25_dev->dev != NULL) {
strlcpy(devname, ax25_dev->dev->name, sizeof(devname));
length = strlen(devname) + 1;
} else {
*devname = '\0';
length = 1;
}
valptr = (void *) devname;
break;
default:
release_sock(sk);
return -ENOPROTOOPT;
}
release_sock(sk);
if (put_user(length, optlen))
return -EFAULT;
return copy_to_user(optval, valptr, length) ? -EFAULT : 0;
}
static int ax25_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int res = 0;
lock_sock(sk);
if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_LISTEN) {
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
goto out;
}
res = -EOPNOTSUPP;
out:
release_sock(sk);
return res;
}
/*
* XXX: when creating ax25_sock we should update the .obj_size setting
* below.
*/
static struct proto ax25_proto = {
.name = "AX25",
.owner = THIS_MODULE,
.obj_size = sizeof(struct sock),
};
static int ax25_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
ax25_cb *ax25;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
switch (sock->type) {
case SOCK_DGRAM:
if (protocol == 0 || protocol == PF_AX25)
protocol = AX25_P_TEXT;
break;
case SOCK_SEQPACKET:
switch (protocol) {
case 0:
case PF_AX25: /* For CLX */
protocol = AX25_P_TEXT;
break;
case AX25_P_SEGMENT:
#ifdef CONFIG_INET
case AX25_P_ARP:
case AX25_P_IP:
#endif
#ifdef CONFIG_NETROM
case AX25_P_NETROM:
#endif
#ifdef CONFIG_ROSE
case AX25_P_ROSE:
#endif
return -ESOCKTNOSUPPORT;
#ifdef CONFIG_NETROM_MODULE
case AX25_P_NETROM:
if (ax25_protocol_is_registered(AX25_P_NETROM))
return -ESOCKTNOSUPPORT;
break;
#endif
#ifdef CONFIG_ROSE_MODULE
case AX25_P_ROSE:
if (ax25_protocol_is_registered(AX25_P_ROSE))
return -ESOCKTNOSUPPORT;
#endif
default:
break;
}
break;
case SOCK_RAW:
break;
default:
return -ESOCKTNOSUPPORT;
}
sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto);
if (sk == NULL)
return -ENOMEM;
ax25 = sk->sk_protinfo = ax25_create_cb();
if (!ax25) {
sk_free(sk);
return -ENOMEM;
}
sock_init_data(sock, sk);
sk->sk_destruct = ax25_free_sock;
sock->ops = &ax25_proto_ops;
sk->sk_protocol = protocol;
ax25->sk = sk;
return 0;
}
struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
{
struct sock *sk;
ax25_cb *ax25, *oax25;
sk = sk_alloc(sock_net(osk), PF_AX25, GFP_ATOMIC, osk->sk_prot);
if (sk == NULL)
return NULL;
if ((ax25 = ax25_create_cb()) == NULL) {
sk_free(sk);
return NULL;
}
switch (osk->sk_type) {
case SOCK_DGRAM:
break;
case SOCK_SEQPACKET:
break;
default:
sk_free(sk);
ax25_cb_put(ax25);
return NULL;
}
sock_init_data(NULL, sk);
sk->sk_type = osk->sk_type;
sk->sk_priority = osk->sk_priority;
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
sk->sk_state = TCP_ESTABLISHED;
sock_copy_flags(sk, osk);
oax25 = ax25_sk(osk);
ax25->modulus = oax25->modulus;
ax25->backoff = oax25->backoff;
ax25->pidincl = oax25->pidincl;
ax25->iamdigi = oax25->iamdigi;
ax25->rtt = oax25->rtt;
ax25->t1 = oax25->t1;
ax25->t2 = oax25->t2;
ax25->t3 = oax25->t3;
ax25->n2 = oax25->n2;
ax25->idle = oax25->idle;
ax25->paclen = oax25->paclen;
ax25->window = oax25->window;
ax25->ax25_dev = ax25_dev;
ax25->source_addr = oax25->source_addr;
if (oax25->digipeat != NULL) {
ax25->digipeat = kmemdup(oax25->digipeat, sizeof(ax25_digi),
GFP_ATOMIC);
if (ax25->digipeat == NULL) {
sk_free(sk);
ax25_cb_put(ax25);
return NULL;
}
}
sk->sk_protinfo = ax25;
sk->sk_destruct = ax25_free_sock;
ax25->sk = sk;
return sk;
}
static int ax25_release(struct socket *sock)
{
struct sock *sk = sock->sk;
ax25_cb *ax25;
if (sk == NULL)
return 0;
sock_hold(sk);
sock_orphan(sk);
lock_sock(sk);
ax25 = ax25_sk(sk);
if (sk->sk_type == SOCK_SEQPACKET) {
switch (ax25->state) {
case AX25_STATE_0:
release_sock(sk);
ax25_disconnect(ax25, 0);
lock_sock(sk);
ax25_destroy_socket(ax25);
break;
case AX25_STATE_1:
case AX25_STATE_2:
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
release_sock(sk);
ax25_disconnect(ax25, 0);
lock_sock(sk);
ax25_destroy_socket(ax25);
break;
case AX25_STATE_3:
case AX25_STATE_4:
ax25_clear_queues(ax25);
ax25->n2count = 0;
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_send_control(ax25,
AX25_DISC,
AX25_POLLON,
AX25_COMMAND);
ax25_stop_t2timer(ax25);
ax25_stop_t3timer(ax25);
ax25_stop_idletimer(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
ax25_stop_t3timer(ax25);
ax25_stop_idletimer(ax25);
break;
#endif
}
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
ax25->state = AX25_STATE_2;
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DESTROY);
break;
default:
break;
}
} else {
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
ax25_destroy_socket(ax25);
}
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
return 0;
}
/*
* We support a funny extension here so you can (as root) give any callsign
* digipeated via a local address as source. This hack is obsolete now
* that we've implemented support for SO_BINDTODEVICE. It is however small
* and trivially backward compatible.
*/
static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr;
ax25_dev *ax25_dev = NULL;
ax25_uid_assoc *user;
ax25_address call;
ax25_cb *ax25;
int err = 0;
if (addr_len != sizeof(struct sockaddr_ax25) &&
addr_len != sizeof(struct full_sockaddr_ax25))
/* support for old structure may go away some time
* ax25_bind(): uses old (6 digipeater) socket structure.
*/
if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) ||
(addr_len > sizeof(struct full_sockaddr_ax25)))
return -EINVAL;
if (addr->fsa_ax25.sax25_family != AF_AX25)
return -EINVAL;
user = ax25_findbyuid(current_euid());
if (user) {
call = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_ADMIN))
return -EACCES;
call = addr->fsa_ax25.sax25_call;
}
lock_sock(sk);
ax25 = ax25_sk(sk);
if (!sock_flag(sk, SOCK_ZAPPED)) {
err = -EINVAL;
goto out;
}
ax25->source_addr = call;
/*
* User already set interface with SO_BINDTODEVICE
*/
if (ax25->ax25_dev != NULL)
goto done;
if (addr_len > sizeof(struct sockaddr_ax25) && addr->fsa_ax25.sax25_ndigis == 1) {
if (ax25cmp(&addr->fsa_digipeater[0], &null_ax25_address) != 0 &&
(ax25_dev = ax25_addr_ax25dev(&addr->fsa_digipeater[0])) == NULL) {
err = -EADDRNOTAVAIL;
goto out;
}
} else {
if ((ax25_dev = ax25_addr_ax25dev(&addr->fsa_ax25.sax25_call)) == NULL) {
err = -EADDRNOTAVAIL;
goto out;
}
}
if (ax25_dev != NULL)
ax25_fillin_cb(ax25, ax25_dev);
done:
ax25_cb_add(ax25);
sock_reset_flag(sk, SOCK_ZAPPED);
out:
release_sock(sk);
return err;
}
/*
* FIXME: nonblock behaviour looks like it may have a bug.
*/
static int __must_check ax25_connect(struct socket *sock,
struct sockaddr *uaddr, int addr_len, int flags)
{
struct sock *sk = sock->sk;
ax25_cb *ax25 = ax25_sk(sk), *ax25t;
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)uaddr;
ax25_digi *digi = NULL;
int ct = 0, err = 0;
/*
* some sanity checks. code further down depends on this
*/
if (addr_len == sizeof(struct sockaddr_ax25))
/* support for this will go away in early 2.5.x
* ax25_connect(): uses obsolete socket structure
*/
;
else if (addr_len != sizeof(struct full_sockaddr_ax25))
/* support for old structure may go away some time
* ax25_connect(): uses old (6 digipeater) socket structure.
*/
if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) ||
(addr_len > sizeof(struct full_sockaddr_ax25)))
return -EINVAL;
if (fsa->fsa_ax25.sax25_family != AF_AX25)
return -EINVAL;
lock_sock(sk);
/* deal with restarts */
if (sock->state == SS_CONNECTING) {
switch (sk->sk_state) {
case TCP_SYN_SENT: /* still trying */
err = -EINPROGRESS;
goto out_release;
case TCP_ESTABLISHED: /* connection established */
sock->state = SS_CONNECTED;
goto out_release;
case TCP_CLOSE: /* connection refused */
sock->state = SS_UNCONNECTED;
err = -ECONNREFUSED;
goto out_release;
}
}
if (sk->sk_state == TCP_ESTABLISHED && sk->sk_type == SOCK_SEQPACKET) {
err = -EISCONN; /* No reconnect on a seqpacket socket */
goto out_release;
}
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
kfree(ax25->digipeat);
ax25->digipeat = NULL;
/*
* Handle digi-peaters to be used.
*/
if (addr_len > sizeof(struct sockaddr_ax25) &&
fsa->fsa_ax25.sax25_ndigis != 0) {
/* Valid number of digipeaters ? */
if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) {
err = -EINVAL;
goto out_release;
}
if ((digi = kmalloc(sizeof(ax25_digi), GFP_KERNEL)) == NULL) {
err = -ENOBUFS;
goto out_release;
}
digi->ndigi = fsa->fsa_ax25.sax25_ndigis;
digi->lastrepeat = -1;
while (ct < fsa->fsa_ax25.sax25_ndigis) {
if ((fsa->fsa_digipeater[ct].ax25_call[6] &
AX25_HBIT) && ax25->iamdigi) {
digi->repeated[ct] = 1;
digi->lastrepeat = ct;
} else {
digi->repeated[ct] = 0;
}
digi->calls[ct] = fsa->fsa_digipeater[ct];
ct++;
}
}
/*
* Must bind first - autobinding in this may or may not work. If
* the socket is already bound, check to see if the device has
* been filled in, error if it hasn't.
*/
if (sock_flag(sk, SOCK_ZAPPED)) {
/* check if we can remove this feature. It is broken. */
printk(KERN_WARNING "ax25_connect(): %s uses autobind, please contact jreuter@yaina.de\n",
current->comm);
if ((err = ax25_rt_autobind(ax25, &fsa->fsa_ax25.sax25_call)) < 0) {
kfree(digi);
goto out_release;
}
ax25_fillin_cb(ax25, ax25->ax25_dev);
ax25_cb_add(ax25);
} else {
if (ax25->ax25_dev == NULL) {
kfree(digi);
err = -EHOSTUNREACH;
goto out_release;
}
}
if (sk->sk_type == SOCK_SEQPACKET &&
(ax25t=ax25_find_cb(&ax25->source_addr, &fsa->fsa_ax25.sax25_call, digi,
ax25->ax25_dev->dev))) {
kfree(digi);
err = -EADDRINUSE; /* Already such a connection */
ax25_cb_put(ax25t);
goto out_release;
}
ax25->dest_addr = fsa->fsa_ax25.sax25_call;
ax25->digipeat = digi;
/* First the easy one */
if (sk->sk_type != SOCK_SEQPACKET) {
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
goto out_release;
}
/* Move to connecting socket, ax.25 lapb WAIT_UA.. */
sock->state = SS_CONNECTING;
sk->sk_state = TCP_SYN_SENT;
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_establish_data_link(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
if (ax25->ax25_dev->dama.slave)
ax25_ds_establish_data_link(ax25);
else
ax25_std_establish_data_link(ax25);
break;
#endif
}
ax25->state = AX25_STATE_1;
ax25_start_heartbeat(ax25);
/* Now the loop */
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
err = -EINPROGRESS;
goto out_release;
}
if (sk->sk_state == TCP_SYN_SENT) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
}
if (sk->sk_state != TCP_ESTABLISHED) {
/* Not in ABM, not in WAIT_UA -> failed */
sock->state = SS_UNCONNECTED;
err = sock_error(sk); /* Always set at this point */
goto out_release;
}
sock->state = SS_CONNECTED;
err = 0;
out_release:
release_sock(sk);
return err;
}
static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sk_buff *skb;
struct sock *newsk;
DEFINE_WAIT(wait);
struct sock *sk;
int err = 0;
if (sock->state != SS_UNCONNECTED)
return -EINVAL;
if ((sk = sock->sk) == NULL)
return -EINVAL;
lock_sock(sk);
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out;
}
if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out;
}
/*
* The read queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
if (flags & O_NONBLOCK) {
err = -EWOULDBLOCK;
break;
}
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out;
newsk = skb->sk;
sock_graft(newsk, newsock);
/* Now attach up the new socket */
kfree_skb(skb);
sk->sk_ack_backlog--;
newsock->state = SS_CONNECTED;
out:
release_sock(sk);
return err;
}
static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)uaddr;
struct sock *sk = sock->sk;
unsigned char ndigi, i;
ax25_cb *ax25;
int err = 0;
memset(fsa, 0, sizeof(*fsa));
lock_sock(sk);
ax25 = ax25_sk(sk);
if (peer != 0) {
if (sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
fsa->fsa_ax25.sax25_family = AF_AX25;
fsa->fsa_ax25.sax25_call = ax25->dest_addr;
if (ax25->digipeat != NULL) {
ndigi = ax25->digipeat->ndigi;
fsa->fsa_ax25.sax25_ndigis = ndigi;
for (i = 0; i < ndigi; i++)
fsa->fsa_digipeater[i] =
ax25->digipeat->calls[i];
}
} else {
fsa->fsa_ax25.sax25_family = AF_AX25;
fsa->fsa_ax25.sax25_call = ax25->source_addr;
fsa->fsa_ax25.sax25_ndigis = 1;
if (ax25->ax25_dev != NULL) {
memcpy(&fsa->fsa_digipeater[0],
ax25->ax25_dev->dev->dev_addr, AX25_ADDR_LEN);
} else {
fsa->fsa_digipeater[0] = null_ax25_address;
}
}
*uaddr_len = sizeof (struct full_sockaddr_ax25);
out:
release_sock(sk);
return err;
}
static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sockaddr_ax25 *usax = (struct sockaddr_ax25 *)msg->msg_name;
struct sock *sk = sock->sk;
struct sockaddr_ax25 sax;
struct sk_buff *skb;
ax25_digi dtmp, *dp;
ax25_cb *ax25;
size_t size;
int lv, err, addr_len = msg->msg_namelen;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
return -EINVAL;
lock_sock(sk);
ax25 = ax25_sk(sk);
if (sock_flag(sk, SOCK_ZAPPED)) {
err = -EADDRNOTAVAIL;
goto out;
}
if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
err = -EPIPE;
goto out;
}
if (ax25->ax25_dev == NULL) {
err = -ENETUNREACH;
goto out;
}
if (len > ax25->ax25_dev->dev->mtu) {
err = -EMSGSIZE;
goto out;
}
if (usax != NULL) {
if (usax->sax25_family != AF_AX25) {
err = -EINVAL;
goto out;
}
if (addr_len == sizeof(struct sockaddr_ax25))
/* ax25_sendmsg(): uses obsolete socket structure */
;
else if (addr_len != sizeof(struct full_sockaddr_ax25))
/* support for old structure may go away some time
* ax25_sendmsg(): uses old (6 digipeater)
* socket structure.
*/
if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) ||
(addr_len > sizeof(struct full_sockaddr_ax25))) {
err = -EINVAL;
goto out;
}
if (addr_len > sizeof(struct sockaddr_ax25) && usax->sax25_ndigis != 0) {
int ct = 0;
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax;
/* Valid number of digipeaters ? */
if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) {
err = -EINVAL;
goto out;
}
dtmp.ndigi = usax->sax25_ndigis;
while (ct < usax->sax25_ndigis) {
dtmp.repeated[ct] = 0;
dtmp.calls[ct] = fsa->fsa_digipeater[ct];
ct++;
}
dtmp.lastrepeat = 0;
}
sax = *usax;
if (sk->sk_type == SOCK_SEQPACKET &&
ax25cmp(&ax25->dest_addr, &sax.sax25_call)) {
err = -EISCONN;
goto out;
}
if (usax->sax25_ndigis == 0)
dp = NULL;
else
dp = &dtmp;
} else {
/*
* FIXME: 1003.1g - if the socket is like this because
* it has become closed (not started closed) and is VC
* we ought to SIGPIPE, EPIPE
*/
if (sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
sax.sax25_family = AF_AX25;
sax.sax25_call = ax25->dest_addr;
dp = ax25->digipeat;
}
/* Build a packet */
/* Assume the worst case */
size = len + ax25->ax25_dev->dev->hard_header_len;
skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT, &err);
if (skb == NULL)
goto out;
skb_reserve(skb, size - len);
/* User data follows immediately after the AX.25 data */
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
err = -EFAULT;
kfree_skb(skb);
goto out;
}
skb_reset_network_header(skb);
/* Add the PID if one is not supplied by the user in the skb */
if (!ax25->pidincl)
*skb_push(skb, 1) = sk->sk_protocol;
if (sk->sk_type == SOCK_SEQPACKET) {
/* Connected mode sockets go via the LAPB machine */
if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
err = -ENOTCONN;
goto out;
}
/* Shove it onto the queue and kick */
ax25_output(ax25, ax25->paclen, skb);
err = len;
goto out;
}
skb_push(skb, 1 + ax25_addr_size(dp));
/* Building AX.25 Header */
/* Build an AX.25 header */
lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call,
dp, AX25_COMMAND, AX25_MODULUS);
skb_set_transport_header(skb, lv);
*skb_transport_header(skb) = AX25_UI;
/* Datagram frames go straight out of the door as UI */
ax25_queue_xmit(skb, ax25->ax25_dev->dev);
err = len;
out:
release_sock(sk);
return err;
}
static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied;
int err = 0;
lock_sock(sk);
/*
* This works for seqpacket too. The receiver has ordered the
* queue for us! We do one quick check first though
*/
if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
/* Now we can treat all alike */
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out;
if (!ax25_sk(sk)->pidincl)
skb_pull(skb, 1); /* Remove PID */
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (msg->msg_namelen != 0) {
struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
ax25_digi digi;
ax25_address src;
const unsigned char *mac = skb_mac_header(skb);
ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
&digi, NULL, NULL);
sax->sax25_family = AF_AX25;
/* We set this correctly, even though we may not let the
application know the digi calls further down (because it
did NOT ask to know them). This could get political... **/
sax->sax25_ndigis = digi.ndigi;
sax->sax25_call = src;
if (sax->sax25_ndigis != 0) {
int ct;
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)sax;
for (ct = 0; ct < digi.ndigi; ct++)
fsa->fsa_digipeater[ct] = digi.calls[ct];
}
msg->msg_namelen = sizeof(struct full_sockaddr_ax25);
}
skb_free_datagram(sk, skb);
err = copied;
out:
release_sock(sk);
return err;
}
static int ax25_shutdown(struct socket *sk, int how)
{
/* FIXME - generate DM and RNR states */
return -EOPNOTSUPP;
}
static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *)arg;
int res = 0;
lock_sock(sk);
switch (cmd) {
case TIOCOUTQ: {
long amount;
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
res = put_user(amount, (int __user *)argp);
break;
}
case TIOCINQ: {
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
res = put_user(amount, (int __user *) argp);
break;
}
case SIOCGSTAMP:
res = sock_get_timestamp(sk, argp);
break;
case SIOCGSTAMPNS:
res = sock_get_timestampns(sk, argp);
break;
case SIOCAX25ADDUID: /* Add a uid to the uid/call map table */
case SIOCAX25DELUID: /* Delete a uid from the uid/call map table */
case SIOCAX25GETUID: {
struct sockaddr_ax25 sax25;
if (copy_from_user(&sax25, argp, sizeof(sax25))) {
res = -EFAULT;
break;
}
res = ax25_uid_ioctl(cmd, &sax25);
break;
}
case SIOCAX25NOUID: { /* Set the default policy (default/bar) */
long amount;
if (!capable(CAP_NET_ADMIN)) {
res = -EPERM;
break;
}
if (get_user(amount, (long __user *)argp)) {
res = -EFAULT;
break;
}
if (amount > AX25_NOUID_BLOCK) {
res = -EINVAL;
break;
}
ax25_uid_policy = amount;
res = 0;
break;
}
case SIOCADDRT:
case SIOCDELRT:
case SIOCAX25OPTRT:
if (!capable(CAP_NET_ADMIN)) {
res = -EPERM;
break;
}
res = ax25_rt_ioctl(cmd, argp);
break;
case SIOCAX25CTLCON:
if (!capable(CAP_NET_ADMIN)) {
res = -EPERM;
break;
}
res = ax25_ctl_ioctl(cmd, argp);
break;
case SIOCAX25GETINFO:
case SIOCAX25GETINFOOLD: {
ax25_cb *ax25 = ax25_sk(sk);
struct ax25_info_struct ax25_info;
ax25_info.t1 = ax25->t1 / HZ;
ax25_info.t2 = ax25->t2 / HZ;
ax25_info.t3 = ax25->t3 / HZ;
ax25_info.idle = ax25->idle / (60 * HZ);
ax25_info.n2 = ax25->n2;
ax25_info.t1timer = ax25_display_timer(&ax25->t1timer) / HZ;
ax25_info.t2timer = ax25_display_timer(&ax25->t2timer) / HZ;
ax25_info.t3timer = ax25_display_timer(&ax25->t3timer) / HZ;
ax25_info.idletimer = ax25_display_timer(&ax25->idletimer) / (60 * HZ);
ax25_info.n2count = ax25->n2count;
ax25_info.state = ax25->state;
ax25_info.rcv_q = sk_rmem_alloc_get(sk);
ax25_info.snd_q = sk_wmem_alloc_get(sk);
ax25_info.vs = ax25->vs;
ax25_info.vr = ax25->vr;
ax25_info.va = ax25->va;
ax25_info.vs_max = ax25->vs; /* reserved */
ax25_info.paclen = ax25->paclen;
ax25_info.window = ax25->window;
/* old structure? */
if (cmd == SIOCAX25GETINFOOLD) {
static int warned = 0;
if (!warned) {
printk(KERN_INFO "%s uses old SIOCAX25GETINFO\n",
current->comm);
warned=1;
}
if (copy_to_user(argp, &ax25_info, sizeof(struct ax25_info_struct_deprecated))) {
res = -EFAULT;
break;
}
} else {
if (copy_to_user(argp, &ax25_info, sizeof(struct ax25_info_struct))) {
res = -EINVAL;
break;
}
}
res = 0;
break;
}
case SIOCAX25ADDFWD:
case SIOCAX25DELFWD: {
struct ax25_fwd_struct ax25_fwd;
if (!capable(CAP_NET_ADMIN)) {
res = -EPERM;
break;
}
if (copy_from_user(&ax25_fwd, argp, sizeof(ax25_fwd))) {
res = -EFAULT;
break;
}
res = ax25_fwd_ioctl(cmd, &ax25_fwd);
break;
}
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFMETRIC:
case SIOCSIFMETRIC:
res = -EINVAL;
break;
default:
res = -ENOIOCTLCMD;
break;
}
release_sock(sk);
return res;
}
#ifdef CONFIG_PROC_FS
static void *ax25_info_start(struct seq_file *seq, loff_t *pos)
__acquires(ax25_list_lock)
{
spin_lock_bh(&ax25_list_lock);
return seq_hlist_start(&ax25_list, *pos);
}
static void *ax25_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &ax25_list, pos);
}
static void ax25_info_stop(struct seq_file *seq, void *v)
__releases(ax25_list_lock)
{
spin_unlock_bh(&ax25_list_lock);
}
static int ax25_info_show(struct seq_file *seq, void *v)
{
ax25_cb *ax25 = hlist_entry(v, struct ax25_cb, ax25_node);
char buf[11];
int k;
/*
* New format:
* magic dev src_addr dest_addr,digi1,digi2,.. st vs vr va t1 t1 t2 t2 t3 t3 idle idle n2 n2 rtt window paclen Snd-Q Rcv-Q inode
*/
seq_printf(seq, "%8.8lx %s %s%s ",
(long) ax25,
ax25->ax25_dev == NULL? "???" : ax25->ax25_dev->dev->name,
ax2asc(buf, &ax25->source_addr),
ax25->iamdigi? "*":"");
seq_printf(seq, "%s", ax2asc(buf, &ax25->dest_addr));
for (k=0; (ax25->digipeat != NULL) && (k < ax25->digipeat->ndigi); k++) {
seq_printf(seq, ",%s%s",
ax2asc(buf, &ax25->digipeat->calls[k]),
ax25->digipeat->repeated[k]? "*":"");
}
seq_printf(seq, " %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %d %d",
ax25->state,
ax25->vs, ax25->vr, ax25->va,
ax25_display_timer(&ax25->t1timer) / HZ, ax25->t1 / HZ,
ax25_display_timer(&ax25->t2timer) / HZ, ax25->t2 / HZ,
ax25_display_timer(&ax25->t3timer) / HZ, ax25->t3 / HZ,
ax25_display_timer(&ax25->idletimer) / (60 * HZ),
ax25->idle / (60 * HZ),
ax25->n2count, ax25->n2,
ax25->rtt / HZ,
ax25->window,
ax25->paclen);
if (ax25->sk != NULL) {
seq_printf(seq, " %d %d %lu\n",
sk_wmem_alloc_get(ax25->sk),
sk_rmem_alloc_get(ax25->sk),
sock_i_ino(ax25->sk));
} else {
seq_puts(seq, " * * *\n");
}
return 0;
}
static const struct seq_operations ax25_info_seqops = {
.start = ax25_info_start,
.next = ax25_info_next,
.stop = ax25_info_stop,
.show = ax25_info_show,
};
static int ax25_info_open(struct inode *inode, struct file *file)
{
return seq_open(file, &ax25_info_seqops);
}
static const struct file_operations ax25_info_fops = {
.owner = THIS_MODULE,
.open = ax25_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif
static const struct net_proto_family ax25_family_ops = {
.family = PF_AX25,
.create = ax25_create,
.owner = THIS_MODULE,
};
static const struct proto_ops ax25_proto_ops = {
.family = PF_AX25,
.owner = THIS_MODULE,
.release = ax25_release,
.bind = ax25_bind,
.connect = ax25_connect,
.socketpair = sock_no_socketpair,
.accept = ax25_accept,
.getname = ax25_getname,
.poll = datagram_poll,
.ioctl = ax25_ioctl,
.listen = ax25_listen,
.shutdown = ax25_shutdown,
.setsockopt = ax25_setsockopt,
.getsockopt = ax25_getsockopt,
.sendmsg = ax25_sendmsg,
.recvmsg = ax25_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
/*
* Called by socket.c on kernel start up
*/
static struct packet_type ax25_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_AX25),
.func = ax25_kiss_rcv,
};
static struct notifier_block ax25_dev_notifier = {
.notifier_call =ax25_device_event,
};
static int __init ax25_init(void)
{
int rc = proto_register(&ax25_proto, 0);
if (rc != 0)
goto out;
sock_register(&ax25_family_ops);
dev_add_pack(&ax25_packet_type);
register_netdevice_notifier(&ax25_dev_notifier);
proc_create("ax25_route", S_IRUGO, init_net.proc_net,
&ax25_route_fops);
proc_create("ax25", S_IRUGO, init_net.proc_net, &ax25_info_fops);
proc_create("ax25_calls", S_IRUGO, init_net.proc_net, &ax25_uid_fops);
out:
return rc;
}
module_init(ax25_init);
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
MODULE_DESCRIPTION("The amateur radio AX.25 link layer protocol");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_AX25);
static void __exit ax25_exit(void)
{
remove_proc_entry("ax25_route", init_net.proc_net);
remove_proc_entry("ax25", init_net.proc_net);
remove_proc_entry("ax25_calls", init_net.proc_net);
unregister_netdevice_notifier(&ax25_dev_notifier);
dev_remove_pack(&ax25_packet_type);
sock_unregister(PF_AX25);
proto_unregister(&ax25_proto);
ax25_rt_free();
ax25_uid_free();
ax25_dev_free();
}
module_exit(ax25_exit);
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_5682_0 |
crossvul-cpp_data_good_1596_0 | /*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Gunnar Beutner
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <strings.h>
#include <libintl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <libzfs.h>
#include <libshare.h>
#include "libshare_impl.h"
#include "nfs.h"
#include "smb.h"
static sa_share_impl_t find_share(sa_handle_impl_t handle,
const char *sharepath);
static sa_share_impl_t alloc_share(const char *sharepath);
static void free_share(sa_share_impl_t share);
static void parse_sharetab(sa_handle_impl_t impl_handle);
static int process_share(sa_handle_impl_t impl_handle,
sa_share_impl_t impl_share, char *pathname, char *resource,
char *fstype, char *options, char *description,
char *dataset, boolean_t from_sharetab);
static void update_sharetab(sa_handle_impl_t impl_handle);
static int update_zfs_share(sa_share_impl_t impl_handle, const char *proto);
static int update_zfs_shares(sa_handle_impl_t impl_handle, const char *proto);
static int fstypes_count;
static sa_fstype_t *fstypes;
/*
* Invokes the specified callback function for each Solaris share option
* listed in the specified string.
*/
int
foreach_shareopt(const char *shareopts,
nfs_shareopt_callback_t callback, void *cookie)
{
char *shareopts_dup, *opt, *cur, *value;
int was_nul, rc;
if (shareopts == NULL)
return (SA_OK);
shareopts_dup = strdup(shareopts);
if (shareopts_dup == NULL)
return (SA_NO_MEMORY);
opt = shareopts_dup;
was_nul = 0;
while (1) {
cur = opt;
while (*cur != ',' && *cur != '\0')
cur++;
if (*cur == '\0')
was_nul = 1;
*cur = '\0';
if (cur > opt) {
value = strchr(opt, '=');
if (value != NULL) {
*value = '\0';
value++;
}
rc = callback(opt, value, cookie);
if (rc != SA_OK) {
free(shareopts_dup);
return (rc);
}
}
opt = cur + 1;
if (was_nul)
break;
}
free(shareopts_dup);
return (SA_OK);
}
sa_fstype_t *
register_fstype(const char *name, const sa_share_ops_t *ops)
{
sa_fstype_t *fstype;
fstype = calloc(sizeof (sa_fstype_t), 1);
if (fstype == NULL)
return (NULL);
fstype->name = name;
fstype->ops = ops;
fstype->fsinfo_index = fstypes_count;
fstypes_count++;
fstype->next = fstypes;
fstypes = fstype;
return (fstype);
}
sa_handle_t
sa_init(int init_service)
{
sa_handle_impl_t impl_handle;
impl_handle = calloc(sizeof (struct sa_handle_impl), 1);
if (impl_handle == NULL)
return (NULL);
impl_handle->zfs_libhandle = libzfs_init();
if (impl_handle->zfs_libhandle != NULL) {
libzfs_print_on_error(impl_handle->zfs_libhandle, B_TRUE);
}
parse_sharetab(impl_handle);
update_zfs_shares(impl_handle, NULL);
return ((sa_handle_t)impl_handle);
}
__attribute__((constructor)) static void
libshare_init(void)
{
libshare_nfs_init();
libshare_smb_init();
}
static void
parse_sharetab(sa_handle_impl_t impl_handle) {
FILE *fp;
char line[512];
char *eol, *pathname, *resource, *fstype, *options, *description;
fp = fopen("/etc/dfs/sharetab", "r");
if (fp == NULL)
return;
while (fgets(line, sizeof (line), fp) != NULL) {
eol = line + strlen(line) - 1;
while (eol >= line) {
if (*eol != '\r' && *eol != '\n')
break;
*eol = '\0';
eol--;
}
pathname = line;
if ((resource = strchr(pathname, '\t')) == NULL)
continue;
*resource = '\0';
resource++;
if ((fstype = strchr(resource, '\t')) == NULL)
continue;
*fstype = '\0';
fstype++;
if ((options = strchr(fstype, '\t')) == NULL)
continue;
*options = '\0';
options++;
if ((description = strchr(fstype, '\t')) != NULL) {
*description = '\0';
description++;
}
if (strcmp(resource, "-") == 0)
resource = NULL;
(void) process_share(impl_handle, NULL, pathname, resource,
fstype, options, description, NULL, B_TRUE);
}
fclose(fp);
}
static void
update_sharetab(sa_handle_impl_t impl_handle)
{
sa_share_impl_t impl_share;
int temp_fd;
FILE *temp_fp;
char tempfile[] = "/etc/dfs/sharetab.XXXXXX";
sa_fstype_t *fstype;
const char *resource;
if (mkdir("/etc/dfs", 0755) < 0 && errno != EEXIST) {
return;
}
temp_fd = mkstemp(tempfile);
if (temp_fd < 0)
return;
temp_fp = fdopen(temp_fd, "w");
if (temp_fp == NULL)
return;
impl_share = impl_handle->shares;
while (impl_share != NULL) {
fstype = fstypes;
while (fstype != NULL) {
if (FSINFO(impl_share, fstype)->active &&
FSINFO(impl_share, fstype)->shareopts != NULL) {
resource = FSINFO(impl_share, fstype)->resource;
if (resource == NULL)
resource = "-";
fprintf(temp_fp, "%s\t%s\t%s\t%s\n",
impl_share->sharepath, resource,
fstype->name,
FSINFO(impl_share, fstype)->shareopts);
}
fstype = fstype->next;
}
impl_share = impl_share->next;
}
fflush(temp_fp);
fsync(temp_fd);
fclose(temp_fp);
rename(tempfile, "/etc/dfs/sharetab");
}
typedef struct update_cookie_s {
sa_handle_impl_t handle;
const char *proto;
} update_cookie_t;
static int
update_zfs_shares_cb(zfs_handle_t *zhp, void *pcookie)
{
update_cookie_t *udata = (update_cookie_t *)pcookie;
char mountpoint[ZFS_MAXPROPLEN];
char shareopts[ZFS_MAXPROPLEN];
char *dataset;
zfs_type_t type = zfs_get_type(zhp);
if (type == ZFS_TYPE_FILESYSTEM &&
zfs_iter_filesystems(zhp, update_zfs_shares_cb, pcookie) != 0) {
zfs_close(zhp);
return (1);
}
if (type != ZFS_TYPE_FILESYSTEM) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
sizeof (mountpoint), NULL, NULL, 0, B_FALSE) != 0) {
zfs_close(zhp);
return (0);
}
dataset = (char *)zfs_get_name(zhp);
if (dataset == NULL) {
zfs_close(zhp);
return (0);
}
if (!zfs_is_mounted(zhp, NULL)) {
zfs_close(zhp);
return (0);
}
if ((udata->proto == NULL || strcmp(udata->proto, "nfs") == 0) &&
zfs_prop_get(zhp, ZFS_PROP_SHARENFS, shareopts,
sizeof (shareopts), NULL, NULL, 0, B_FALSE) == 0 &&
strcmp(shareopts, "off") != 0) {
(void) process_share(udata->handle, NULL, mountpoint, NULL,
"nfs", shareopts, NULL, dataset, B_FALSE);
}
if ((udata->proto == NULL || strcmp(udata->proto, "smb") == 0) &&
zfs_prop_get(zhp, ZFS_PROP_SHARESMB, shareopts,
sizeof (shareopts), NULL, NULL, 0, B_FALSE) == 0 &&
strcmp(shareopts, "off") != 0) {
(void) process_share(udata->handle, NULL, mountpoint, NULL,
"smb", shareopts, NULL, dataset, B_FALSE);
}
zfs_close(zhp);
return (0);
}
static int
update_zfs_share(sa_share_impl_t impl_share, const char *proto)
{
sa_handle_impl_t impl_handle = impl_share->handle;
zfs_handle_t *zhp;
update_cookie_t udata;
if (impl_handle->zfs_libhandle == NULL)
return (SA_SYSTEM_ERR);
assert(impl_share->dataset != NULL);
zhp = zfs_open(impl_share->handle->zfs_libhandle, impl_share->dataset,
ZFS_TYPE_FILESYSTEM);
if (zhp == NULL)
return (SA_SYSTEM_ERR);
udata.handle = impl_handle;
udata.proto = proto;
(void) update_zfs_shares_cb(zhp, &udata);
return (SA_OK);
}
static int
update_zfs_shares(sa_handle_impl_t impl_handle, const char *proto)
{
update_cookie_t udata;
if (impl_handle->zfs_libhandle == NULL)
return (SA_SYSTEM_ERR);
udata.handle = impl_handle;
udata.proto = proto;
(void) zfs_iter_root(impl_handle->zfs_libhandle, update_zfs_shares_cb,
&udata);
return (SA_OK);
}
static int
process_share(sa_handle_impl_t impl_handle, sa_share_impl_t impl_share,
char *pathname, char *resource, char *proto,
char *options, char *description, char *dataset,
boolean_t from_sharetab)
{
struct stat statbuf;
int rc;
char *resource_dup = NULL, *dataset_dup = NULL;
boolean_t new_share;
sa_fstype_t *fstype;
new_share = B_FALSE;
if (impl_share == NULL)
impl_share = find_share(impl_handle, pathname);
if (impl_share == NULL) {
if (lstat(pathname, &statbuf) != 0 ||
!S_ISDIR(statbuf.st_mode))
return (SA_BAD_PATH);
impl_share = alloc_share(pathname);
if (impl_share == NULL) {
rc = SA_NO_MEMORY;
goto err;
}
new_share = B_TRUE;
}
if (dataset != NULL) {
dataset_dup = strdup(dataset);
if (dataset_dup == NULL) {
rc = SA_NO_MEMORY;
goto err;
}
}
free(impl_share->dataset);
impl_share->dataset = dataset_dup;
rc = SA_INVALID_PROTOCOL;
fstype = fstypes;
while (fstype != NULL) {
if (strcmp(fstype->name, proto) == 0) {
if (resource != NULL) {
resource_dup = strdup(resource);
if (resource_dup == NULL) {
rc = SA_NO_MEMORY;
goto err;
}
}
free(FSINFO(impl_share, fstype)->resource);
FSINFO(impl_share, fstype)->resource = resource_dup;
rc = fstype->ops->update_shareopts(impl_share,
resource, options);
if (rc == SA_OK && from_sharetab)
FSINFO(impl_share, fstype)->active = B_TRUE;
break;
}
fstype = fstype->next;
}
if (rc != SA_OK)
goto err;
if (new_share) {
impl_share->handle = impl_handle;
impl_share->next = impl_handle->shares;
impl_handle->shares = impl_share;
}
err:
if (rc != SA_OK) {
if (new_share)
free_share(impl_share);
}
return (rc);
}
void
sa_fini(sa_handle_t handle)
{
sa_handle_impl_t impl_handle = (sa_handle_impl_t)handle;
sa_share_impl_t impl_share, next;
sa_share_impl_t *pcurr;
if (impl_handle == NULL)
return;
/*
* clean up shares which don't have a non-NULL dataset property,
* which means they're in sharetab but we couldn't find their
* ZFS dataset.
*/
pcurr = &(impl_handle->shares);
impl_share = *pcurr;
while (impl_share != NULL) {
next = impl_share->next;
if (impl_share->dataset == NULL) {
/* remove item from the linked list */
*pcurr = next;
sa_disable_share(impl_share, NULL);
free_share(impl_share);
} else {
pcurr = &(impl_share->next);
}
impl_share = next;
}
update_sharetab(impl_handle);
if (impl_handle->zfs_libhandle != NULL)
libzfs_fini(impl_handle->zfs_libhandle);
impl_share = impl_handle->shares;
while (impl_share != NULL) {
next = impl_share->next;
free_share(impl_share);
impl_share = next;
}
free(impl_handle);
}
static sa_share_impl_t
find_share(sa_handle_impl_t impl_handle, const char *sharepath)
{
sa_share_impl_t impl_share;
impl_share = impl_handle->shares;
while (impl_share != NULL) {
if (strcmp(impl_share->sharepath, sharepath) == 0) {
break;
}
impl_share = impl_share->next;
}
return (impl_share);
}
sa_share_t
sa_find_share(sa_handle_t handle, char *sharepath)
{
return ((sa_share_t)find_share((sa_handle_impl_t)handle, sharepath));
}
int
sa_enable_share(sa_share_t share, char *protocol)
{
sa_share_impl_t impl_share = (sa_share_impl_t)share;
int rc, ret;
boolean_t found_protocol;
sa_fstype_t *fstype;
#ifdef DEBUG
fprintf(stderr, "sa_enable_share: share->sharepath=%s, protocol=%s\n",
impl_share->sharepath, protocol);
#endif
assert(impl_share->handle != NULL);
ret = SA_OK;
found_protocol = B_FALSE;
fstype = fstypes;
while (fstype != NULL) {
if (protocol == NULL || strcmp(fstype->name, protocol) == 0) {
update_zfs_share(impl_share, fstype->name);
rc = fstype->ops->enable_share(impl_share);
if (rc != SA_OK)
ret = rc;
else
FSINFO(impl_share, fstype)->active = B_TRUE;
found_protocol = B_TRUE;
}
fstype = fstype->next;
}
update_sharetab(impl_share->handle);
return (found_protocol ? ret : SA_INVALID_PROTOCOL);
}
int
sa_disable_share(sa_share_t share, char *protocol)
{
sa_share_impl_t impl_share = (sa_share_impl_t)share;
int rc, ret;
boolean_t found_protocol;
sa_fstype_t *fstype;
#ifdef DEBUG
fprintf(stderr, "sa_disable_share: share->sharepath=%s, protocol=%s\n",
impl_share->sharepath, protocol);
#endif
ret = SA_OK;
found_protocol = B_FALSE;
fstype = fstypes;
while (fstype != NULL) {
if (protocol == NULL || strcmp(fstype->name, protocol) == 0) {
rc = fstype->ops->disable_share(impl_share);
if (rc == SA_OK) {
fstype->ops->clear_shareopts(impl_share);
FSINFO(impl_share, fstype)->active = B_FALSE;
} else
ret = rc;
found_protocol = B_TRUE;
}
fstype = fstype->next;
}
update_sharetab(impl_share->handle);
return (found_protocol ? ret : SA_INVALID_PROTOCOL);
}
/*
* sa_errorstr(err)
*
* convert an error value to an error string
*/
char *
sa_errorstr(int err)
{
static char errstr[32];
char *ret = NULL;
switch (err) {
case SA_OK:
ret = dgettext(TEXT_DOMAIN, "ok");
break;
case SA_NO_SUCH_PATH:
ret = dgettext(TEXT_DOMAIN, "path doesn't exist");
break;
case SA_NO_MEMORY:
ret = dgettext(TEXT_DOMAIN, "no memory");
break;
case SA_DUPLICATE_NAME:
ret = dgettext(TEXT_DOMAIN, "name in use");
break;
case SA_BAD_PATH:
ret = dgettext(TEXT_DOMAIN, "bad path");
break;
case SA_NO_SUCH_GROUP:
ret = dgettext(TEXT_DOMAIN, "no such group");
break;
case SA_CONFIG_ERR:
ret = dgettext(TEXT_DOMAIN, "configuration error");
break;
case SA_SYSTEM_ERR:
ret = dgettext(TEXT_DOMAIN, "system error");
break;
case SA_SYNTAX_ERR:
ret = dgettext(TEXT_DOMAIN, "syntax error");
break;
case SA_NO_PERMISSION:
ret = dgettext(TEXT_DOMAIN, "no permission");
break;
case SA_BUSY:
ret = dgettext(TEXT_DOMAIN, "busy");
break;
case SA_NO_SUCH_PROP:
ret = dgettext(TEXT_DOMAIN, "no such property");
break;
case SA_INVALID_NAME:
ret = dgettext(TEXT_DOMAIN, "invalid name");
break;
case SA_INVALID_PROTOCOL:
ret = dgettext(TEXT_DOMAIN, "invalid protocol");
break;
case SA_NOT_ALLOWED:
ret = dgettext(TEXT_DOMAIN, "operation not allowed");
break;
case SA_BAD_VALUE:
ret = dgettext(TEXT_DOMAIN, "bad property value");
break;
case SA_INVALID_SECURITY:
ret = dgettext(TEXT_DOMAIN, "invalid security type");
break;
case SA_NO_SUCH_SECURITY:
ret = dgettext(TEXT_DOMAIN, "security type not found");
break;
case SA_VALUE_CONFLICT:
ret = dgettext(TEXT_DOMAIN, "property value conflict");
break;
case SA_NOT_IMPLEMENTED:
ret = dgettext(TEXT_DOMAIN, "not implemented");
break;
case SA_INVALID_PATH:
ret = dgettext(TEXT_DOMAIN, "invalid path");
break;
case SA_NOT_SUPPORTED:
ret = dgettext(TEXT_DOMAIN, "operation not supported");
break;
case SA_PROP_SHARE_ONLY:
ret = dgettext(TEXT_DOMAIN, "property not valid for group");
break;
case SA_NOT_SHARED:
ret = dgettext(TEXT_DOMAIN, "not shared");
break;
case SA_NO_SUCH_RESOURCE:
ret = dgettext(TEXT_DOMAIN, "no such resource");
break;
case SA_RESOURCE_REQUIRED:
ret = dgettext(TEXT_DOMAIN, "resource name required");
break;
case SA_MULTIPLE_ERROR:
ret = dgettext(TEXT_DOMAIN, "errors from multiple protocols");
break;
case SA_PATH_IS_SUBDIR:
ret = dgettext(TEXT_DOMAIN, "path is a subpath of share");
break;
case SA_PATH_IS_PARENTDIR:
ret = dgettext(TEXT_DOMAIN, "path is parent of a share");
break;
case SA_NO_SECTION:
ret = dgettext(TEXT_DOMAIN, "protocol requires a section");
break;
case SA_NO_PROPERTIES:
ret = dgettext(TEXT_DOMAIN, "properties not found");
break;
case SA_NO_SUCH_SECTION:
ret = dgettext(TEXT_DOMAIN, "section not found");
break;
case SA_PASSWORD_ENC:
ret = dgettext(TEXT_DOMAIN, "passwords must be encrypted");
break;
case SA_SHARE_EXISTS:
ret = dgettext(TEXT_DOMAIN, "path or file is already shared");
break;
default:
(void) snprintf(errstr, sizeof (errstr),
dgettext(TEXT_DOMAIN, "unknown %d"), err);
ret = errstr;
}
return (ret);
}
int
sa_parse_legacy_options(sa_group_t group, char *options, char *proto)
{
sa_fstype_t *fstype;
#ifdef DEBUG
fprintf(stderr, "sa_parse_legacy_options: options=%s, proto=%s\n",
options, proto);
#endif
fstype = fstypes;
while (fstype != NULL) {
if (strcmp(fstype->name, proto) != 0) {
fstype = fstype->next;
continue;
}
return (fstype->ops->validate_shareopts(options));
}
return (SA_INVALID_PROTOCOL);
}
boolean_t
sa_needs_refresh(sa_handle_t handle)
{
return (B_TRUE);
}
libzfs_handle_t *
sa_get_zfs_handle(sa_handle_t handle)
{
sa_handle_impl_t impl_handle = (sa_handle_impl_t)handle;
if (impl_handle == NULL)
return (NULL);
return (impl_handle->zfs_libhandle);
}
static sa_share_impl_t
alloc_share(const char *sharepath)
{
sa_share_impl_t impl_share;
impl_share = calloc(sizeof (struct sa_share_impl), 1);
if (impl_share == NULL)
return (NULL);
impl_share->sharepath = strdup(sharepath);
if (impl_share->sharepath == NULL) {
free(impl_share);
return (NULL);
}
impl_share->fsinfo = calloc(sizeof (sa_share_fsinfo_t), fstypes_count);
if (impl_share->fsinfo == NULL) {
free(impl_share->sharepath);
free(impl_share);
return (NULL);
}
return (impl_share);
}
static void
free_share(sa_share_impl_t impl_share) {
sa_fstype_t *fstype;
fstype = fstypes;
while (fstype != NULL) {
fstype->ops->clear_shareopts(impl_share);
free(FSINFO(impl_share, fstype)->resource);
fstype = fstype->next;
}
free(impl_share->sharepath);
free(impl_share->dataset);
free(impl_share->fsinfo);
free(impl_share);
}
int
sa_zfs_process_share(sa_handle_t handle, sa_group_t group, sa_share_t share,
char *mountpoint, char *proto, zprop_source_t source, char *shareopts,
char *sourcestr, char *dataset)
{
sa_handle_impl_t impl_handle = (sa_handle_impl_t)handle;
sa_share_impl_t impl_share = (sa_share_impl_t)share;
#ifdef DEBUG
fprintf(stderr, "sa_zfs_process_share: mountpoint=%s, proto=%s, "
"shareopts=%s, sourcestr=%s, dataset=%s\n", mountpoint, proto,
shareopts, sourcestr, dataset);
#endif
return (process_share(impl_handle, impl_share, mountpoint, NULL,
proto, shareopts, NULL, dataset, B_FALSE));
}
void
sa_update_sharetab_ts(sa_handle_t handle)
{
sa_handle_impl_t impl_handle = (sa_handle_impl_t)handle;
update_sharetab(impl_handle);
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_1596_0 |
crossvul-cpp_data_bad_5057_0 | /*
* Timers abstract layer
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/string.h>
#include <sound/core.h>
#include <sound/timer.h>
#include <sound/control.h>
#include <sound/info.h>
#include <sound/minors.h>
#include <sound/initval.h>
#include <linux/kmod.h>
#if IS_ENABLED(CONFIG_SND_HRTIMER)
#define DEFAULT_TIMER_LIMIT 4
#else
#define DEFAULT_TIMER_LIMIT 1
#endif
static int timer_limit = DEFAULT_TIMER_LIMIT;
static int timer_tstamp_monotonic = 1;
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>");
MODULE_DESCRIPTION("ALSA timer interface");
MODULE_LICENSE("GPL");
module_param(timer_limit, int, 0444);
MODULE_PARM_DESC(timer_limit, "Maximum global timers in system.");
module_param(timer_tstamp_monotonic, int, 0444);
MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default).");
MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER);
MODULE_ALIAS("devname:snd/timer");
struct snd_timer_user {
struct snd_timer_instance *timeri;
int tread; /* enhanced read with timestamps and events */
unsigned long ticks;
unsigned long overrun;
int qhead;
int qtail;
int qused;
int queue_size;
bool disconnected;
struct snd_timer_read *queue;
struct snd_timer_tread *tqueue;
spinlock_t qlock;
unsigned long last_resolution;
unsigned int filter;
struct timespec tstamp; /* trigger tstamp */
wait_queue_head_t qchange_sleep;
struct fasync_struct *fasync;
struct mutex ioctl_lock;
};
/* list of timers */
static LIST_HEAD(snd_timer_list);
/* list of slave instances */
static LIST_HEAD(snd_timer_slave_list);
/* lock for slave active lists */
static DEFINE_SPINLOCK(slave_active_lock);
static DEFINE_MUTEX(register_mutex);
static int snd_timer_free(struct snd_timer *timer);
static int snd_timer_dev_free(struct snd_device *device);
static int snd_timer_dev_register(struct snd_device *device);
static int snd_timer_dev_disconnect(struct snd_device *device);
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left);
/*
* create a timer instance with the given owner string.
* when timer is not NULL, increments the module counter
*/
static struct snd_timer_instance *snd_timer_instance_new(char *owner,
struct snd_timer *timer)
{
struct snd_timer_instance *timeri;
timeri = kzalloc(sizeof(*timeri), GFP_KERNEL);
if (timeri == NULL)
return NULL;
timeri->owner = kstrdup(owner, GFP_KERNEL);
if (! timeri->owner) {
kfree(timeri);
return NULL;
}
INIT_LIST_HEAD(&timeri->open_list);
INIT_LIST_HEAD(&timeri->active_list);
INIT_LIST_HEAD(&timeri->ack_list);
INIT_LIST_HEAD(&timeri->slave_list_head);
INIT_LIST_HEAD(&timeri->slave_active_head);
timeri->timer = timer;
if (timer && !try_module_get(timer->module)) {
kfree(timeri->owner);
kfree(timeri);
return NULL;
}
return timeri;
}
/*
* find a timer instance from the given timer id
*/
static struct snd_timer *snd_timer_find(struct snd_timer_id *tid)
{
struct snd_timer *timer = NULL;
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->tmr_class != tid->dev_class)
continue;
if ((timer->tmr_class == SNDRV_TIMER_CLASS_CARD ||
timer->tmr_class == SNDRV_TIMER_CLASS_PCM) &&
(timer->card == NULL ||
timer->card->number != tid->card))
continue;
if (timer->tmr_device != tid->device)
continue;
if (timer->tmr_subdevice != tid->subdevice)
continue;
return timer;
}
return NULL;
}
#ifdef CONFIG_MODULES
static void snd_timer_request(struct snd_timer_id *tid)
{
switch (tid->dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
if (tid->device < timer_limit)
request_module("snd-timer-%i", tid->device);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (tid->card < snd_ecards_limit)
request_module("snd-card-%i", tid->card);
break;
default:
break;
}
}
#endif
/*
* look for a master instance matching with the slave id of the given slave.
* when found, relink the open_link of the slave.
*
* call this with register_mutex down.
*/
static void snd_timer_check_slave(struct snd_timer_instance *slave)
{
struct snd_timer *timer;
struct snd_timer_instance *master;
/* FIXME: it's really dumb to look up all entries.. */
list_for_each_entry(timer, &snd_timer_list, device_list) {
list_for_each_entry(master, &timer->open_list_head, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list,
&master->slave_list_head);
spin_lock_irq(&slave_active_lock);
slave->master = master;
slave->timer = master->timer;
spin_unlock_irq(&slave_active_lock);
return;
}
}
}
}
/*
* look for slave instances matching with the slave id of the given master.
* when found, relink the open_link of slaves.
*
* call this with register_mutex down.
*/
static void snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
/* check all pending slaves */
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list, &master->slave_list_head);
spin_lock_irq(&slave_active_lock);
spin_lock(&master->timer->lock);
slave->master = master;
slave->timer = master->timer;
if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
list_add_tail(&slave->active_list,
&master->slave_active_head);
spin_unlock(&master->timer->lock);
spin_unlock_irq(&slave_active_lock);
}
}
}
/*
* open a timer instance
* when opening a master, the slave id must be here given.
*/
int snd_timer_open(struct snd_timer_instance **ti,
char *owner, struct snd_timer_id *tid,
unsigned int slave_id)
{
struct snd_timer *timer;
struct snd_timer_instance *timeri = NULL;
if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
/* open a slave instance */
if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE ||
tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) {
pr_debug("ALSA: timer: invalid slave class %i\n",
tid->dev_sclass);
return -EINVAL;
}
mutex_lock(®ister_mutex);
timeri = snd_timer_instance_new(owner, NULL);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
snd_timer_check_slave(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
/* open a master instance */
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
#ifdef CONFIG_MODULES
if (!timer) {
mutex_unlock(®ister_mutex);
snd_timer_request(tid);
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
}
#endif
if (!timer) {
mutex_unlock(®ister_mutex);
return -ENODEV;
}
if (!list_empty(&timer->open_list_head)) {
timeri = list_entry(timer->open_list_head.next,
struct snd_timer_instance, open_list);
if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
mutex_unlock(®ister_mutex);
return -EBUSY;
}
}
timeri = snd_timer_instance_new(owner, timer);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
/* take a card refcount for safe disconnection */
if (timer->card)
get_device(&timer->card->card_dev);
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = slave_id;
if (list_empty(&timer->open_list_head) && timer->hw.open)
timer->hw.open(timer);
list_add_tail(&timeri->open_list, &timer->open_list_head);
snd_timer_check_master(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
/*
* close a timer instance
*/
int snd_timer_close(struct snd_timer_instance *timeri)
{
struct snd_timer *timer = NULL;
struct snd_timer_instance *slave, *tmp;
if (snd_BUG_ON(!timeri))
return -ENXIO;
mutex_lock(®ister_mutex);
list_del(&timeri->open_list);
/* force to stop the timer */
snd_timer_stop(timeri);
timer = timeri->timer;
if (timer) {
/* wait, until the active callback is finished */
spin_lock_irq(&timer->lock);
while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
spin_unlock_irq(&timer->lock);
udelay(10);
spin_lock_irq(&timer->lock);
}
spin_unlock_irq(&timer->lock);
/* remove slave links */
spin_lock_irq(&slave_active_lock);
spin_lock(&timer->lock);
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
open_list) {
list_move_tail(&slave->open_list, &snd_timer_slave_list);
slave->master = NULL;
slave->timer = NULL;
list_del_init(&slave->ack_list);
list_del_init(&slave->active_list);
}
spin_unlock(&timer->lock);
spin_unlock_irq(&slave_active_lock);
/* slave doesn't need to release timer resources below */
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
timer = NULL;
}
if (timeri->private_free)
timeri->private_free(timeri);
kfree(timeri->owner);
kfree(timeri);
if (timer) {
if (list_empty(&timer->open_list_head) && timer->hw.close)
timer->hw.close(timer);
/* release a card refcount for safe disconnection */
if (timer->card)
put_device(&timer->card->card_dev);
module_put(timer->module);
}
mutex_unlock(®ister_mutex);
return 0;
}
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
{
struct snd_timer * timer;
if (timeri == NULL)
return 0;
if ((timer = timeri->timer) != NULL) {
if (timer->hw.c_resolution)
return timer->hw.c_resolution(timer);
return timer->hw.resolution;
}
return 0;
}
static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
{
struct snd_timer *timer;
unsigned long resolution = 0;
struct snd_timer_instance *ts;
struct timespec tstamp;
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START ||
event > SNDRV_TIMER_EVENT_PAUSE))
return;
if (event == SNDRV_TIMER_EVENT_START ||
event == SNDRV_TIMER_EVENT_CONTINUE)
resolution = snd_timer_resolution(ti);
if (ti->ccallback)
ti->ccallback(ti, event, &tstamp, resolution);
if (ti->flags & SNDRV_TIMER_IFLG_SLAVE)
return;
timer = ti->timer;
if (timer == NULL)
return;
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
return;
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event + 100, &tstamp, resolution);
}
/* start/continue a master timer */
static int snd_timer_start1(struct snd_timer_instance *timeri,
bool start, unsigned long ticks)
{
struct snd_timer *timer;
int result;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (timer->card && timer->card->shutdown) {
result = -ENODEV;
goto unlock;
}
if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START)) {
result = -EBUSY;
goto unlock;
}
if (start)
timeri->ticks = timeri->cticks = ticks;
else if (!timeri->cticks)
timeri->cticks = 1;
timeri->pticks = 0;
list_move_tail(&timeri->active_list, &timer->active_list_head);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
goto __start_now;
timer->flags |= SNDRV_TIMER_FLG_RESCHED;
timeri->flags |= SNDRV_TIMER_IFLG_START;
result = 1; /* delayed start */
} else {
if (start)
timer->sticks = ticks;
timer->hw.start(timer);
__start_now:
timer->running++;
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
result = 0;
}
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* start/continue a slave timer */
static int snd_timer_start_slave(struct snd_timer_instance *timeri,
bool start)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master && timeri->timer) {
spin_lock(&timeri->timer->lock);
list_add_tail(&timeri->active_list,
&timeri->master->slave_active_head);
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 1; /* delayed start */
}
/* stop/pause a master timer */
static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
{
struct snd_timer *timer;
int result = 0;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START))) {
result = -EBUSY;
goto unlock;
}
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if (timer->card && timer->card->shutdown)
goto unlock;
if (stop) {
timeri->cticks = timeri->ticks;
timeri->pticks = 0;
}
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
timer->hw.stop(timer);
if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
snd_timer_reschedule(timer, 0);
if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
}
}
timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* stop/pause a slave timer */
static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
if (timeri->timer) {
spin_lock(&timeri->timer->lock);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 0;
}
/*
* start the timer instance
*/
int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
{
if (timeri == NULL || ticks < 1)
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, true);
else
return snd_timer_start1(timeri, true, ticks);
}
/*
* stop the timer instance.
*
* do not call this from the timer callback!
*/
int snd_timer_stop(struct snd_timer_instance *timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, true);
else
return snd_timer_stop1(timeri, true);
}
/*
* start again.. the tick is kept.
*/
int snd_timer_continue(struct snd_timer_instance *timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, false);
else
return snd_timer_start1(timeri, false, 0);
}
/*
* pause.. remember the ticks left
*/
int snd_timer_pause(struct snd_timer_instance * timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, false);
else
return snd_timer_stop1(timeri, false);
}
/*
* reschedule the timer
*
* start pending instances and check the scheduling ticks.
* when the scheduling ticks is changed set CHANGE flag to reprogram the timer.
*/
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti;
unsigned long ticks = ~0UL;
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->flags & SNDRV_TIMER_IFLG_START) {
ti->flags &= ~SNDRV_TIMER_IFLG_START;
ti->flags |= SNDRV_TIMER_IFLG_RUNNING;
timer->running++;
}
if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) {
if (ticks > ti->cticks)
ticks = ti->cticks;
}
}
if (ticks == ~0UL) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
return;
}
if (ticks > timer->hw.ticks)
ticks = timer->hw.ticks;
if (ticks_left != ticks)
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
timer->sticks = ticks;
}
/*
* timer tasklet
*
*/
static void snd_timer_tasklet(unsigned long arg)
{
struct snd_timer *timer = (struct snd_timer *) arg;
struct snd_timer_instance *ti;
struct list_head *p;
unsigned long resolution, ticks;
unsigned long flags;
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* now process all callbacks */
while (!list_empty(&timer->sack_list_head)) {
p = timer->sack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
resolution = ti->resolution;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* timer interrupt
*
* ticks_left is usually equal to timer->sticks.
*
*/
void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti, *ts, *tmp;
unsigned long resolution, ticks;
struct list_head *p, *ack_list_head;
unsigned long flags;
int use_tasklet = 0;
if (timer == NULL)
return;
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* remember the current resolution */
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
/* loop for all active instances
* Here we cannot use list_for_each_entry because the active_list of a
* processed instance is relinked to done_list_head before the callback
* is called.
*/
list_for_each_entry_safe(ti, tmp, &timer->active_list_head,
active_list) {
if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING))
continue;
ti->pticks += ticks_left;
ti->resolution = resolution;
if (ti->cticks < ticks_left)
ti->cticks = 0;
else
ti->cticks -= ticks_left;
if (ti->cticks) /* not expired */
continue;
if (ti->flags & SNDRV_TIMER_IFLG_AUTO) {
ti->cticks = ti->ticks;
} else {
ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
--timer->running;
list_del_init(&ti->active_list);
}
if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
(ti->flags & SNDRV_TIMER_IFLG_FAST))
ack_list_head = &timer->ack_list_head;
else
ack_list_head = &timer->sack_list_head;
if (list_empty(&ti->ack_list))
list_add_tail(&ti->ack_list, ack_list_head);
list_for_each_entry(ts, &ti->slave_active_head, active_list) {
ts->pticks = ti->pticks;
ts->resolution = resolution;
if (list_empty(&ts->ack_list))
list_add_tail(&ts->ack_list, ack_list_head);
}
}
if (timer->flags & SNDRV_TIMER_FLG_RESCHED)
snd_timer_reschedule(timer, timer->sticks);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_STOP) {
timer->hw.stop(timer);
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
}
if (!(timer->hw.flags & SNDRV_TIMER_HW_AUTO) ||
(timer->flags & SNDRV_TIMER_FLG_CHANGE)) {
/* restart timer */
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
} else {
timer->hw.stop(timer);
}
/* now process all fast callbacks */
while (!list_empty(&timer->ack_list_head)) {
p = timer->ack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
/* do we have any slow callbacks? */
use_tasklet = !list_empty(&timer->sack_list_head);
spin_unlock_irqrestore(&timer->lock, flags);
if (use_tasklet)
tasklet_schedule(&timer->task_queue);
}
/*
*/
int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
struct snd_timer **rtimer)
{
struct snd_timer *timer;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_timer_dev_free,
.dev_register = snd_timer_dev_register,
.dev_disconnect = snd_timer_dev_disconnect,
};
if (snd_BUG_ON(!tid))
return -EINVAL;
if (rtimer)
*rtimer = NULL;
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
timer->tmr_class = tid->dev_class;
timer->card = card;
timer->tmr_device = tid->device;
timer->tmr_subdevice = tid->subdevice;
if (id)
strlcpy(timer->id, id, sizeof(timer->id));
INIT_LIST_HEAD(&timer->device_list);
INIT_LIST_HEAD(&timer->open_list_head);
INIT_LIST_HEAD(&timer->active_list_head);
INIT_LIST_HEAD(&timer->ack_list_head);
INIT_LIST_HEAD(&timer->sack_list_head);
spin_lock_init(&timer->lock);
tasklet_init(&timer->task_queue, snd_timer_tasklet,
(unsigned long)timer);
if (card != NULL) {
timer->module = card->module;
err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
if (err < 0) {
snd_timer_free(timer);
return err;
}
}
if (rtimer)
*rtimer = timer;
return 0;
}
static int snd_timer_free(struct snd_timer *timer)
{
if (!timer)
return 0;
mutex_lock(®ister_mutex);
if (! list_empty(&timer->open_list_head)) {
struct list_head *p, *n;
struct snd_timer_instance *ti;
pr_warn("ALSA: timer %p is busy?\n", timer);
list_for_each_safe(p, n, &timer->open_list_head) {
list_del_init(p);
ti = list_entry(p, struct snd_timer_instance, open_list);
ti->timer = NULL;
}
}
list_del(&timer->device_list);
mutex_unlock(®ister_mutex);
if (timer->private_free)
timer->private_free(timer);
kfree(timer);
return 0;
}
static int snd_timer_dev_free(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
return snd_timer_free(timer);
}
static int snd_timer_dev_register(struct snd_device *dev)
{
struct snd_timer *timer = dev->device_data;
struct snd_timer *timer1;
if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop))
return -ENXIO;
if (!(timer->hw.flags & SNDRV_TIMER_HW_SLAVE) &&
!timer->hw.resolution && timer->hw.c_resolution == NULL)
return -EINVAL;
mutex_lock(®ister_mutex);
list_for_each_entry(timer1, &snd_timer_list, device_list) {
if (timer1->tmr_class > timer->tmr_class)
break;
if (timer1->tmr_class < timer->tmr_class)
continue;
if (timer1->card && timer->card) {
if (timer1->card->number > timer->card->number)
break;
if (timer1->card->number < timer->card->number)
continue;
}
if (timer1->tmr_device > timer->tmr_device)
break;
if (timer1->tmr_device < timer->tmr_device)
continue;
if (timer1->tmr_subdevice > timer->tmr_subdevice)
break;
if (timer1->tmr_subdevice < timer->tmr_subdevice)
continue;
/* conflicts.. */
mutex_unlock(®ister_mutex);
return -EBUSY;
}
list_add_tail(&timer->device_list, &timer1->device_list);
mutex_unlock(®ister_mutex);
return 0;
}
static int snd_timer_dev_disconnect(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_del_init(&timer->device_list);
/* wake up pending sleepers */
list_for_each_entry(ti, &timer->open_list_head, open_list) {
if (ti->disconnect)
ti->disconnect(ti);
}
mutex_unlock(®ister_mutex);
return 0;
}
void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp)
{
unsigned long flags;
unsigned long resolution = 0;
struct snd_timer_instance *ti, *ts;
if (timer->card && timer->card->shutdown)
return;
if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
return;
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
event > SNDRV_TIMER_EVENT_MRESUME))
return;
spin_lock_irqsave(&timer->lock, flags);
if (event == SNDRV_TIMER_EVENT_MSTART ||
event == SNDRV_TIMER_EVENT_MCONTINUE ||
event == SNDRV_TIMER_EVENT_MRESUME) {
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
}
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->ccallback)
ti->ccallback(ti, event, tstamp, resolution);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event, tstamp, resolution);
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* exported functions for global timers
*/
int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer)
{
struct snd_timer_id tid;
tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL;
tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE;
tid.card = -1;
tid.device = device;
tid.subdevice = 0;
return snd_timer_new(NULL, id, &tid, rtimer);
}
int snd_timer_global_free(struct snd_timer *timer)
{
return snd_timer_free(timer);
}
int snd_timer_global_register(struct snd_timer *timer)
{
struct snd_device dev;
memset(&dev, 0, sizeof(dev));
dev.device_data = timer;
return snd_timer_dev_register(&dev);
}
/*
* System timer
*/
struct snd_timer_system_private {
struct timer_list tlist;
unsigned long last_expires;
unsigned long last_jiffies;
unsigned long correction;
};
static void snd_timer_s_function(unsigned long data)
{
struct snd_timer *timer = (struct snd_timer *)data;
struct snd_timer_system_private *priv = timer->private_data;
unsigned long jiff = jiffies;
if (time_after(jiff, priv->last_expires))
priv->correction += (long)jiff - (long)priv->last_expires;
snd_timer_interrupt(timer, (long)jiff - (long)priv->last_jiffies);
}
static int snd_timer_s_start(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long njiff;
priv = (struct snd_timer_system_private *) timer->private_data;
njiff = (priv->last_jiffies = jiffies);
if (priv->correction > timer->sticks - 1) {
priv->correction -= timer->sticks - 1;
njiff++;
} else {
njiff += timer->sticks - priv->correction;
priv->correction = 0;
}
priv->last_expires = njiff;
mod_timer(&priv->tlist, njiff);
return 0;
}
static int snd_timer_s_stop(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long jiff;
priv = (struct snd_timer_system_private *) timer->private_data;
del_timer(&priv->tlist);
jiff = jiffies;
if (time_before(jiff, priv->last_expires))
timer->sticks = priv->last_expires - jiff;
else
timer->sticks = 1;
priv->correction = 0;
return 0;
}
static int snd_timer_s_close(struct snd_timer *timer)
{
struct snd_timer_system_private *priv;
priv = (struct snd_timer_system_private *)timer->private_data;
del_timer_sync(&priv->tlist);
return 0;
}
static struct snd_timer_hardware snd_timer_system =
{
.flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET,
.resolution = 1000000000L / HZ,
.ticks = 10000000L,
.close = snd_timer_s_close,
.start = snd_timer_s_start,
.stop = snd_timer_s_stop
};
static void snd_timer_free_system(struct snd_timer *timer)
{
kfree(timer->private_data);
}
static int snd_timer_register_system(void)
{
struct snd_timer *timer;
struct snd_timer_system_private *priv;
int err;
err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer);
if (err < 0)
return err;
strcpy(timer->name, "system timer");
timer->hw = snd_timer_system;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL) {
snd_timer_free(timer);
return -ENOMEM;
}
setup_timer(&priv->tlist, snd_timer_s_function, (unsigned long) timer);
timer->private_data = priv;
timer->private_free = snd_timer_free_system;
return snd_timer_global_register(timer);
}
#ifdef CONFIG_SND_PROC_FS
/*
* Info interface
*/
static void snd_timer_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_timer *timer;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->card && timer->card->shutdown)
continue;
switch (timer->tmr_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
snd_iprintf(buffer, "G%i: ", timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_CARD:
snd_iprintf(buffer, "C%i-%i: ",
timer->card->number, timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_PCM:
snd_iprintf(buffer, "P%i-%i-%i: ", timer->card->number,
timer->tmr_device, timer->tmr_subdevice);
break;
default:
snd_iprintf(buffer, "?%i-%i-%i-%i: ", timer->tmr_class,
timer->card ? timer->card->number : -1,
timer->tmr_device, timer->tmr_subdevice);
}
snd_iprintf(buffer, "%s :", timer->name);
if (timer->hw.resolution)
snd_iprintf(buffer, " %lu.%03luus (%lu ticks)",
timer->hw.resolution / 1000,
timer->hw.resolution % 1000,
timer->hw.ticks);
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
snd_iprintf(buffer, " SLAVE");
snd_iprintf(buffer, "\n");
list_for_each_entry(ti, &timer->open_list_head, open_list)
snd_iprintf(buffer, " Client %s : %s\n",
ti->owner ? ti->owner : "unknown",
ti->flags & (SNDRV_TIMER_IFLG_START |
SNDRV_TIMER_IFLG_RUNNING)
? "running" : "stopped");
}
mutex_unlock(®ister_mutex);
}
static struct snd_info_entry *snd_timer_proc_entry;
static void __init snd_timer_proc_init(void)
{
struct snd_info_entry *entry;
entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL);
if (entry != NULL) {
entry->c.text.read = snd_timer_proc_read;
if (snd_info_register(entry) < 0) {
snd_info_free_entry(entry);
entry = NULL;
}
}
snd_timer_proc_entry = entry;
}
static void __exit snd_timer_proc_done(void)
{
snd_info_free_entry(snd_timer_proc_entry);
}
#else /* !CONFIG_SND_PROC_FS */
#define snd_timer_proc_init()
#define snd_timer_proc_done()
#endif
/*
* USER SPACE interface
*/
static void snd_timer_user_interrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_read *r;
int prev;
spin_lock(&tu->qlock);
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->queue[prev];
if (r->resolution == resolution) {
r->ticks += ticks;
goto __wake;
}
}
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
r = &tu->queue[tu->qtail++];
tu->qtail %= tu->queue_size;
r->resolution = resolution;
r->ticks = ticks;
tu->qused++;
}
__wake:
spin_unlock(&tu->qlock);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_append_to_tqueue(struct snd_timer_user *tu,
struct snd_timer_tread *tread)
{
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread));
tu->qtail %= tu->queue_size;
tu->qused++;
}
}
static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
int event,
struct timespec *tstamp,
unsigned long resolution)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread r1;
unsigned long flags;
if (event >= SNDRV_TIMER_EVENT_START &&
event <= SNDRV_TIMER_EVENT_PAUSE)
tu->tstamp = *tstamp;
if ((tu->filter & (1 << event)) == 0 || !tu->tread)
return;
r1.event = event;
r1.tstamp = *tstamp;
r1.val = resolution;
spin_lock_irqsave(&tu->qlock, flags);
snd_timer_user_append_to_tqueue(tu, &r1);
spin_unlock_irqrestore(&tu->qlock, flags);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_disconnect(struct snd_timer_instance *timeri)
{
struct snd_timer_user *tu = timeri->callback_data;
tu->disconnected = true;
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread *r, r1;
struct timespec tstamp;
int prev, append = 0;
memset(&tstamp, 0, sizeof(tstamp));
spin_lock(&tu->qlock);
if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) |
(1 << SNDRV_TIMER_EVENT_TICK))) == 0) {
spin_unlock(&tu->qlock);
return;
}
if (tu->last_resolution != resolution || ticks > 0) {
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
tu->last_resolution != resolution) {
r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
r1.tstamp = tstamp;
r1.val = resolution;
snd_timer_user_append_to_tqueue(tu, &r1);
tu->last_resolution = resolution;
append++;
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0)
goto __wake;
if (ticks == 0)
goto __wake;
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->tqueue[prev];
if (r->event == SNDRV_TIMER_EVENT_TICK) {
r->tstamp = tstamp;
r->val += ticks;
append++;
goto __wake;
}
}
r1.event = SNDRV_TIMER_EVENT_TICK;
r1.tstamp = tstamp;
r1.val = ticks;
snd_timer_user_append_to_tqueue(tu, &r1);
append++;
__wake:
spin_unlock(&tu->qlock);
if (append == 0)
return;
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static int snd_timer_user_open(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (tu == NULL)
return -ENOMEM;
spin_lock_init(&tu->qlock);
init_waitqueue_head(&tu->qchange_sleep);
mutex_init(&tu->ioctl_lock);
tu->ticks = 1;
tu->queue_size = 128;
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL) {
kfree(tu);
return -ENOMEM;
}
file->private_data = tu;
return 0;
}
static int snd_timer_user_release(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
if (file->private_data) {
tu = file->private_data;
file->private_data = NULL;
mutex_lock(&tu->ioctl_lock);
if (tu->timeri)
snd_timer_close(tu->timeri);
mutex_unlock(&tu->ioctl_lock);
kfree(tu->queue);
kfree(tu->tqueue);
kfree(tu);
}
return 0;
}
static void snd_timer_user_zero_id(struct snd_timer_id *id)
{
id->dev_class = SNDRV_TIMER_CLASS_NONE;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = -1;
id->device = -1;
id->subdevice = -1;
}
static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer)
{
id->dev_class = timer->tmr_class;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = timer->card ? timer->card->number : -1;
id->device = timer->tmr_device;
id->subdevice = timer->tmr_subdevice;
}
static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
{
struct snd_timer_id id;
struct snd_timer *timer;
struct list_head *p;
if (copy_from_user(&id, _tid, sizeof(id)))
return -EFAULT;
mutex_lock(®ister_mutex);
if (id.dev_class < 0) { /* first item */
if (list_empty(&snd_timer_list))
snd_timer_user_zero_id(&id);
else {
timer = list_entry(snd_timer_list.next,
struct snd_timer, device_list);
snd_timer_user_copy_id(&id, timer);
}
} else {
switch (id.dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
id.device = id.device < 0 ? 0 : id.device + 1;
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device >= id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (id.card < 0) {
id.card = 0;
} else {
if (id.card < 0) {
id.card = 0;
} else {
if (id.device < 0) {
id.device = 0;
} else {
if (id.subdevice < 0) {
id.subdevice = 0;
} else {
id.subdevice++;
}
}
}
}
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > id.dev_class) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_class < id.dev_class)
continue;
if (timer->card->number > id.card) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->card->number < id.card)
continue;
if (timer->tmr_device > id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device < id.device)
continue;
if (timer->tmr_subdevice > id.subdevice) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_subdevice < id.subdevice)
continue;
snd_timer_user_copy_id(&id, timer);
break;
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
default:
snd_timer_user_zero_id(&id);
}
}
mutex_unlock(®ister_mutex);
if (copy_to_user(_tid, &id, sizeof(*_tid)))
return -EFAULT;
return 0;
}
static int snd_timer_user_ginfo(struct file *file,
struct snd_timer_ginfo __user *_ginfo)
{
struct snd_timer_ginfo *ginfo;
struct snd_timer_id tid;
struct snd_timer *t;
struct list_head *p;
int err = 0;
ginfo = memdup_user(_ginfo, sizeof(*ginfo));
if (IS_ERR(ginfo))
return PTR_ERR(ginfo);
tid = ginfo->tid;
memset(ginfo, 0, sizeof(*ginfo));
ginfo->tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
ginfo->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
ginfo->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(ginfo->id, t->id, sizeof(ginfo->id));
strlcpy(ginfo->name, t->name, sizeof(ginfo->name));
ginfo->resolution = t->hw.resolution;
if (t->hw.resolution_min > 0) {
ginfo->resolution_min = t->hw.resolution_min;
ginfo->resolution_max = t->hw.resolution_max;
}
list_for_each(p, &t->open_list_head) {
ginfo->clients++;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_ginfo, ginfo, sizeof(*ginfo)))
err = -EFAULT;
kfree(ginfo);
return err;
}
static int timer_set_gparams(struct snd_timer_gparams *gparams)
{
struct snd_timer *t;
int err;
mutex_lock(®ister_mutex);
t = snd_timer_find(&gparams->tid);
if (!t) {
err = -ENODEV;
goto _error;
}
if (!list_empty(&t->open_list_head)) {
err = -EBUSY;
goto _error;
}
if (!t->hw.set_period) {
err = -ENOSYS;
goto _error;
}
err = t->hw.set_period(t, gparams->period_num, gparams->period_den);
_error:
mutex_unlock(®ister_mutex);
return err;
}
static int snd_timer_user_gparams(struct file *file,
struct snd_timer_gparams __user *_gparams)
{
struct snd_timer_gparams gparams;
if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
return -EFAULT;
return timer_set_gparams(&gparams);
}
static int snd_timer_user_gstatus(struct file *file,
struct snd_timer_gstatus __user *_gstatus)
{
struct snd_timer_gstatus gstatus;
struct snd_timer_id tid;
struct snd_timer *t;
int err = 0;
if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus)))
return -EFAULT;
tid = gstatus.tid;
memset(&gstatus, 0, sizeof(gstatus));
gstatus.tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
if (t->hw.c_resolution)
gstatus.resolution = t->hw.c_resolution(t);
else
gstatus.resolution = t->hw.resolution;
if (t->hw.precise_resolution) {
t->hw.precise_resolution(t, &gstatus.resolution_num,
&gstatus.resolution_den);
} else {
gstatus.resolution_num = gstatus.resolution;
gstatus.resolution_den = 1000000000uL;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus)))
err = -EFAULT;
return err;
}
static int snd_timer_user_tselect(struct file *file,
struct snd_timer_select __user *_tselect)
{
struct snd_timer_user *tu;
struct snd_timer_select tselect;
char str[32];
int err = 0;
tu = file->private_data;
if (tu->timeri) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
err = -EFAULT;
goto __err;
}
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
if (err < 0)
goto __err;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
tu->tqueue = NULL;
if (tu->tread) {
tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread),
GFP_KERNEL);
if (tu->tqueue == NULL)
err = -ENOMEM;
} else {
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL)
err = -ENOMEM;
}
if (err < 0) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
} else {
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
? snd_timer_user_tinterrupt : snd_timer_user_interrupt;
tu->timeri->ccallback = snd_timer_user_ccallback;
tu->timeri->callback_data = (void *)tu;
tu->timeri->disconnect = snd_timer_user_disconnect;
}
__err:
return err;
}
static int snd_timer_user_info(struct file *file,
struct snd_timer_info __user *_info)
{
struct snd_timer_user *tu;
struct snd_timer_info *info;
struct snd_timer *t;
int err = 0;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (! info)
return -ENOMEM;
info->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
info->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(info->id, t->id, sizeof(info->id));
strlcpy(info->name, t->name, sizeof(info->name));
info->resolution = t->hw.resolution;
if (copy_to_user(_info, info, sizeof(*_info)))
err = -EFAULT;
kfree(info);
return err;
}
static int snd_timer_user_params(struct file *file,
struct snd_timer_params __user *_params)
{
struct snd_timer_user *tu;
struct snd_timer_params params;
struct snd_timer *t;
struct snd_timer_read *tr;
struct snd_timer_tread *ttr;
int err;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
if (copy_from_user(¶ms, _params, sizeof(params)))
return -EFAULT;
if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) {
err = -EINVAL;
goto _end;
}
if (params.queue_size > 0 &&
(params.queue_size < 32 || params.queue_size > 1024)) {
err = -EINVAL;
goto _end;
}
if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)|
(1<<SNDRV_TIMER_EVENT_TICK)|
(1<<SNDRV_TIMER_EVENT_START)|
(1<<SNDRV_TIMER_EVENT_STOP)|
(1<<SNDRV_TIMER_EVENT_CONTINUE)|
(1<<SNDRV_TIMER_EVENT_PAUSE)|
(1<<SNDRV_TIMER_EVENT_SUSPEND)|
(1<<SNDRV_TIMER_EVENT_RESUME)|
(1<<SNDRV_TIMER_EVENT_MSTART)|
(1<<SNDRV_TIMER_EVENT_MSTOP)|
(1<<SNDRV_TIMER_EVENT_MCONTINUE)|
(1<<SNDRV_TIMER_EVENT_MPAUSE)|
(1<<SNDRV_TIMER_EVENT_MSUSPEND)|
(1<<SNDRV_TIMER_EVENT_MRESUME))) {
err = -EINVAL;
goto _end;
}
snd_timer_stop(tu->timeri);
spin_lock_irq(&t->lock);
tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO|
SNDRV_TIMER_IFLG_EXCLUSIVE|
SNDRV_TIMER_IFLG_EARLY_EVENT);
if (params.flags & SNDRV_TIMER_PSFLG_AUTO)
tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO;
if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE;
if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT;
spin_unlock_irq(&t->lock);
if (params.queue_size > 0 &&
(unsigned int)tu->queue_size != params.queue_size) {
if (tu->tread) {
ttr = kmalloc(params.queue_size * sizeof(*ttr),
GFP_KERNEL);
if (ttr) {
kfree(tu->tqueue);
tu->queue_size = params.queue_size;
tu->tqueue = ttr;
}
} else {
tr = kmalloc(params.queue_size * sizeof(*tr),
GFP_KERNEL);
if (tr) {
kfree(tu->queue);
tu->queue_size = params.queue_size;
tu->queue = tr;
}
}
}
tu->qhead = tu->qtail = tu->qused = 0;
if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
if (tu->tread) {
struct snd_timer_tread tread;
tread.event = SNDRV_TIMER_EVENT_EARLY;
tread.tstamp.tv_sec = 0;
tread.tstamp.tv_nsec = 0;
tread.val = 0;
snd_timer_user_append_to_tqueue(tu, &tread);
} else {
struct snd_timer_read *r = &tu->queue[0];
r->resolution = 0;
r->ticks = 0;
tu->qused++;
tu->qtail++;
}
}
tu->filter = params.filter;
tu->ticks = params.ticks;
err = 0;
_end:
if (copy_to_user(_params, ¶ms, sizeof(params)))
return -EFAULT;
return err;
}
static int snd_timer_user_status(struct file *file,
struct snd_timer_status __user *_status)
{
struct snd_timer_user *tu;
struct snd_timer_status status;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
memset(&status, 0, sizeof(status));
status.tstamp = tu->tstamp;
status.resolution = snd_timer_resolution(tu->timeri);
status.lost = tu->timeri->lost;
status.overrun = tu->overrun;
spin_lock_irq(&tu->qlock);
status.queue = tu->qused;
spin_unlock_irq(&tu->qlock);
if (copy_to_user(_status, &status, sizeof(status)))
return -EFAULT;
return 0;
}
static int snd_timer_user_start(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
snd_timer_stop(tu->timeri);
tu->timeri->lost = 0;
tu->last_resolution = 0;
return (err = snd_timer_start(tu->timeri, tu->ticks)) < 0 ? err : 0;
}
static int snd_timer_user_stop(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_stop(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_continue(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
tu->timeri->lost = 0;
return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_pause(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_pause(tu->timeri)) < 0 ? err : 0;
}
enum {
SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20),
SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21),
SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22),
SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
};
static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu;
void __user *argp = (void __user *)arg;
int __user *p = argp;
tu = file->private_data;
switch (cmd) {
case SNDRV_TIMER_IOCTL_PVERSION:
return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0;
case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
return snd_timer_user_next_device(argp);
case SNDRV_TIMER_IOCTL_TREAD:
{
int xarg;
if (tu->timeri) /* too late */
return -EBUSY;
if (get_user(xarg, p))
return -EFAULT;
tu->tread = xarg ? 1 : 0;
return 0;
}
case SNDRV_TIMER_IOCTL_GINFO:
return snd_timer_user_ginfo(file, argp);
case SNDRV_TIMER_IOCTL_GPARAMS:
return snd_timer_user_gparams(file, argp);
case SNDRV_TIMER_IOCTL_GSTATUS:
return snd_timer_user_gstatus(file, argp);
case SNDRV_TIMER_IOCTL_SELECT:
return snd_timer_user_tselect(file, argp);
case SNDRV_TIMER_IOCTL_INFO:
return snd_timer_user_info(file, argp);
case SNDRV_TIMER_IOCTL_PARAMS:
return snd_timer_user_params(file, argp);
case SNDRV_TIMER_IOCTL_STATUS:
return snd_timer_user_status(file, argp);
case SNDRV_TIMER_IOCTL_START:
case SNDRV_TIMER_IOCTL_START_OLD:
return snd_timer_user_start(file);
case SNDRV_TIMER_IOCTL_STOP:
case SNDRV_TIMER_IOCTL_STOP_OLD:
return snd_timer_user_stop(file);
case SNDRV_TIMER_IOCTL_CONTINUE:
case SNDRV_TIMER_IOCTL_CONTINUE_OLD:
return snd_timer_user_continue(file);
case SNDRV_TIMER_IOCTL_PAUSE:
case SNDRV_TIMER_IOCTL_PAUSE_OLD:
return snd_timer_user_pause(file);
}
return -ENOTTY;
}
static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu = file->private_data;
long ret;
mutex_lock(&tu->ioctl_lock);
ret = __snd_timer_user_ioctl(file, cmd, arg);
mutex_unlock(&tu->ioctl_lock);
return ret;
}
static int snd_timer_user_fasync(int fd, struct file * file, int on)
{
struct snd_timer_user *tu;
tu = file->private_data;
return fasync_helper(fd, file, on, &tu->fasync);
}
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
size_t count, loff_t *offset)
{
struct snd_timer_user *tu;
long result = 0, unit;
int qhead;
int err = 0;
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto _error;
}
set_current_state(TASK_INTERRUPTIBLE);
init_waitqueue_entry(&wait, current);
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
schedule();
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
if (tu->disconnected) {
err = -ENODEV;
goto _error;
}
if (signal_pending(current)) {
err = -ERESTARTSYS;
goto _error;
}
}
qhead = tu->qhead++;
tu->qhead %= tu->queue_size;
spin_unlock_irq(&tu->qlock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
err = -EFAULT;
} else {
if (copy_to_user(buffer, &tu->queue[qhead],
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
spin_lock_irq(&tu->qlock);
tu->qused--;
if (err < 0)
goto _error;
result += unit;
buffer += unit;
}
_error:
spin_unlock_irq(&tu->qlock);
return result > 0 ? result : err;
}
static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
{
unsigned int mask;
struct snd_timer_user *tu;
tu = file->private_data;
poll_wait(file, &tu->qchange_sleep, wait);
mask = 0;
if (tu->qused)
mask |= POLLIN | POLLRDNORM;
if (tu->disconnected)
mask |= POLLERR;
return mask;
}
#ifdef CONFIG_COMPAT
#include "timer_compat.c"
#else
#define snd_timer_user_ioctl_compat NULL
#endif
static const struct file_operations snd_timer_f_ops =
{
.owner = THIS_MODULE,
.read = snd_timer_user_read,
.open = snd_timer_user_open,
.release = snd_timer_user_release,
.llseek = no_llseek,
.poll = snd_timer_user_poll,
.unlocked_ioctl = snd_timer_user_ioctl,
.compat_ioctl = snd_timer_user_ioctl_compat,
.fasync = snd_timer_user_fasync,
};
/* unregister the system timer */
static void snd_timer_free_all(void)
{
struct snd_timer *timer, *n;
list_for_each_entry_safe(timer, n, &snd_timer_list, device_list)
snd_timer_free(timer);
}
static struct device timer_dev;
/*
* ENTRY functions
*/
static int __init alsa_timer_init(void)
{
int err;
snd_device_initialize(&timer_dev, NULL);
dev_set_name(&timer_dev, "timer");
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1,
"system timer");
#endif
err = snd_timer_register_system();
if (err < 0) {
pr_err("ALSA: unable to register system timer (%i)\n", err);
put_device(&timer_dev);
return err;
}
err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0,
&snd_timer_f_ops, NULL, &timer_dev);
if (err < 0) {
pr_err("ALSA: unable to register timer device (%i)\n", err);
snd_timer_free_all();
put_device(&timer_dev);
return err;
}
snd_timer_proc_init();
return 0;
}
static void __exit alsa_timer_exit(void)
{
snd_unregister_device(&timer_dev);
snd_timer_free_all();
put_device(&timer_dev);
snd_timer_proc_done();
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1);
#endif
}
module_init(alsa_timer_init)
module_exit(alsa_timer_exit)
EXPORT_SYMBOL(snd_timer_open);
EXPORT_SYMBOL(snd_timer_close);
EXPORT_SYMBOL(snd_timer_resolution);
EXPORT_SYMBOL(snd_timer_start);
EXPORT_SYMBOL(snd_timer_stop);
EXPORT_SYMBOL(snd_timer_continue);
EXPORT_SYMBOL(snd_timer_pause);
EXPORT_SYMBOL(snd_timer_new);
EXPORT_SYMBOL(snd_timer_notify);
EXPORT_SYMBOL(snd_timer_global_new);
EXPORT_SYMBOL(snd_timer_global_free);
EXPORT_SYMBOL(snd_timer_global_register);
EXPORT_SYMBOL(snd_timer_interrupt);
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_5057_0 |
crossvul-cpp_data_good_1793_0 | /* -*- mode: c; c-file-style: "bsd"; indent-tabs-mode: t -*- */
/*
* Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved
*/
#include <gssrpc/rpc.h>
#include <krb5.h>
#include <errno.h>
#include <kadm5/admin.h>
#include <kadm5/kadm_rpc.h>
#include <kadm5/admin_xdr.h>
#include <stdlib.h>
#include <string.h>
static bool_t
_xdr_kadm5_principal_ent_rec(XDR *xdrs, kadm5_principal_ent_rec *objp,
int v);
static bool_t
_xdr_kadm5_policy_ent_rec(XDR *xdrs, kadm5_policy_ent_rec *objp, int vers);
/*
* Function: xdr_ui_4
*
* Purpose: XDR function which serves as a wrapper for xdr_u_int32,
* to prevent compiler warnings about type clashes between u_int32
* and krb5_ui_4.
*/
bool_t xdr_ui_4(XDR *xdrs, krb5_ui_4 *objp)
{
/* Assumes that krb5_ui_4 and u_int32 are both four bytes long.
This should not be a harmful assumption. */
return xdr_u_int32(xdrs, (uint32_t *) objp);
}
/*
* Function: xdr_nullstring
*
* Purpose: XDR function for "strings" that are either NULL-terminated
* or NULL.
*/
bool_t xdr_nullstring(XDR *xdrs, char **objp)
{
u_int size;
if (xdrs->x_op == XDR_ENCODE) {
if (*objp == NULL)
size = 0;
else
size = strlen(*objp) + 1;
}
if (! xdr_u_int(xdrs, &size)) {
return FALSE;
}
switch (xdrs->x_op) {
case XDR_DECODE:
if (size == 0) {
*objp = NULL;
return TRUE;
} else if (*objp == NULL) {
*objp = (char *) mem_alloc(size);
if (*objp == NULL) {
errno = ENOMEM;
return FALSE;
}
}
if (!xdr_opaque(xdrs, *objp, size))
return FALSE;
/* Check that the unmarshalled bytes are a C string. */
if ((*objp)[size - 1] != '\0')
return FALSE;
if (memchr(*objp, '\0', size - 1) != NULL)
return FALSE;
return TRUE;
case XDR_ENCODE:
if (size != 0)
return (xdr_opaque(xdrs, *objp, size));
return TRUE;
case XDR_FREE:
if (*objp != NULL)
mem_free(*objp, size);
*objp = NULL;
return TRUE;
}
return FALSE;
}
/*
* Function: xdr_nulltype
*
* Purpose: XDR function for arbitrary pointer types that are either
* NULL or contain data.
*/
bool_t xdr_nulltype(XDR *xdrs, void **objp, xdrproc_t proc)
{
bool_t null;
switch (xdrs->x_op) {
case XDR_DECODE:
if (!xdr_bool(xdrs, &null))
return FALSE;
if (null) {
*objp = NULL;
return TRUE;
}
return (*proc)(xdrs, objp);
case XDR_ENCODE:
if (*objp == NULL)
null = TRUE;
else
null = FALSE;
if (!xdr_bool(xdrs, &null))
return FALSE;
if (null == FALSE)
return (*proc)(xdrs, objp);
return TRUE;
case XDR_FREE:
if (*objp)
return (*proc)(xdrs, objp);
return TRUE;
}
return FALSE;
}
bool_t
xdr_krb5_timestamp(XDR *xdrs, krb5_timestamp *objp)
{
/* This assumes that int32 and krb5_timestamp are the same size.
This shouldn't be a problem, since we've got a unit test which
checks for this. */
if (!xdr_int32(xdrs, (int32_t *) objp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_krb5_kvno(XDR *xdrs, krb5_kvno *objp)
{
return xdr_u_int(xdrs, objp);
}
bool_t
xdr_krb5_deltat(XDR *xdrs, krb5_deltat *objp)
{
/* This assumes that int32 and krb5_deltat are the same size.
This shouldn't be a problem, since we've got a unit test which
checks for this. */
if (!xdr_int32(xdrs, (int32_t *) objp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_krb5_flags(XDR *xdrs, krb5_flags *objp)
{
/* This assumes that int32 and krb5_flags are the same size.
This shouldn't be a problem, since we've got a unit test which
checks for this. */
if (!xdr_int32(xdrs, (int32_t *) objp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_krb5_ui_4(XDR *xdrs, krb5_ui_4 *objp)
{
if (!xdr_u_int32(xdrs, (uint32_t *) objp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_krb5_int16(XDR *xdrs, krb5_int16 *objp)
{
int tmp;
tmp = (int) *objp;
if (!xdr_int(xdrs, &tmp))
return(FALSE);
*objp = (krb5_int16) tmp;
return(TRUE);
}
/*
* Function: xdr_krb5_ui_2
*
* Purpose: XDR function which serves as a wrapper for xdr_u_int,
* to prevent compiler warnings about type clashes between u_int
* and krb5_ui_2.
*/
bool_t
xdr_krb5_ui_2(XDR *xdrs, krb5_ui_2 *objp)
{
unsigned int tmp;
tmp = (unsigned int) *objp;
if (!xdr_u_int(xdrs, &tmp))
return(FALSE);
*objp = (krb5_ui_2) tmp;
return(TRUE);
}
static bool_t xdr_krb5_boolean(XDR *xdrs, krb5_boolean *kbool)
{
bool_t val;
switch (xdrs->x_op) {
case XDR_DECODE:
if (!xdr_bool(xdrs, &val))
return FALSE;
*kbool = (val == FALSE) ? FALSE : TRUE;
return TRUE;
case XDR_ENCODE:
val = *kbool ? TRUE : FALSE;
return xdr_bool(xdrs, &val);
case XDR_FREE:
return TRUE;
}
return FALSE;
}
bool_t xdr_krb5_key_data_nocontents(XDR *xdrs, krb5_key_data *objp)
{
/*
* Note that this function intentionally DOES NOT tranfer key
* length or contents! xdr_krb5_key_data in adb_xdr.c does, but
* that is only for use within the server-side library.
*/
unsigned int tmp;
if (xdrs->x_op == XDR_DECODE)
memset(objp, 0, sizeof(krb5_key_data));
if (!xdr_krb5_int16(xdrs, &objp->key_data_ver)) {
return (FALSE);
}
if (!xdr_krb5_ui_2(xdrs, &objp->key_data_kvno)) {
return (FALSE);
}
if (!xdr_krb5_int16(xdrs, &objp->key_data_type[0])) {
return (FALSE);
}
if (objp->key_data_ver > 1) {
if (!xdr_krb5_int16(xdrs, &objp->key_data_type[1])) {
return (FALSE);
}
}
/*
* kadm5_get_principal on the server side allocates and returns
* key contents when asked. Even though this function refuses to
* transmit that data, it still has to *free* the data at the
* appropriate time to avoid a memory leak.
*/
if (xdrs->x_op == XDR_FREE) {
tmp = (unsigned int) objp->key_data_length[0];
if (!xdr_bytes(xdrs, (char **) &objp->key_data_contents[0],
&tmp, ~0))
return FALSE;
tmp = (unsigned int) objp->key_data_length[1];
if (!xdr_bytes(xdrs, (char **) &objp->key_data_contents[1],
&tmp, ~0))
return FALSE;
}
return (TRUE);
}
bool_t
xdr_krb5_key_salt_tuple(XDR *xdrs, krb5_key_salt_tuple *objp)
{
if (!xdr_krb5_enctype(xdrs, &objp->ks_enctype))
return FALSE;
if (!xdr_krb5_salttype(xdrs, &objp->ks_salttype))
return FALSE;
return TRUE;
}
bool_t xdr_krb5_tl_data(XDR *xdrs, krb5_tl_data **tl_data_head)
{
krb5_tl_data *tl, *tl2;
bool_t more;
unsigned int len;
switch (xdrs->x_op) {
case XDR_FREE:
tl = tl2 = *tl_data_head;
while (tl) {
tl2 = tl->tl_data_next;
free(tl->tl_data_contents);
free(tl);
tl = tl2;
}
*tl_data_head = NULL;
break;
case XDR_ENCODE:
tl = *tl_data_head;
while (1) {
more = (tl != NULL);
if (!xdr_bool(xdrs, &more))
return FALSE;
if (tl == NULL)
break;
if (!xdr_krb5_int16(xdrs, &tl->tl_data_type))
return FALSE;
len = tl->tl_data_length;
if (!xdr_bytes(xdrs, (char **) &tl->tl_data_contents, &len, ~0))
return FALSE;
tl = tl->tl_data_next;
}
break;
case XDR_DECODE:
tl = NULL;
while (1) {
if (!xdr_bool(xdrs, &more))
return FALSE;
if (more == FALSE)
break;
tl2 = (krb5_tl_data *) malloc(sizeof(krb5_tl_data));
if (tl2 == NULL)
return FALSE;
memset(tl2, 0, sizeof(krb5_tl_data));
if (!xdr_krb5_int16(xdrs, &tl2->tl_data_type))
return FALSE;
if (!xdr_bytes(xdrs, (char **)&tl2->tl_data_contents, &len, ~0))
return FALSE;
tl2->tl_data_length = len;
tl2->tl_data_next = tl;
tl = tl2;
}
*tl_data_head = tl;
break;
}
return TRUE;
}
bool_t
xdr_kadm5_ret_t(XDR *xdrs, kadm5_ret_t *objp)
{
uint32_t tmp;
if (xdrs->x_op == XDR_ENCODE)
tmp = (uint32_t) *objp;
if (!xdr_u_int32(xdrs, &tmp))
return (FALSE);
if (xdrs->x_op == XDR_DECODE)
*objp = (kadm5_ret_t) tmp;
return (TRUE);
}
bool_t xdr_kadm5_principal_ent_rec(XDR *xdrs,
kadm5_principal_ent_rec *objp)
{
return _xdr_kadm5_principal_ent_rec(xdrs, objp, KADM5_API_VERSION_3);
}
static bool_t
_xdr_kadm5_principal_ent_rec(XDR *xdrs, kadm5_principal_ent_rec *objp,
int v)
{
unsigned int n;
if (!xdr_krb5_principal(xdrs, &objp->principal)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->princ_expire_time)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->last_pwd_change)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->pw_expiration)) {
return (FALSE);
}
if (!xdr_krb5_deltat(xdrs, &objp->max_life)) {
return (FALSE);
}
if (!xdr_nulltype(xdrs, (void **) &objp->mod_name,
xdr_krb5_principal)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->mod_date)) {
return (FALSE);
}
if (!xdr_krb5_flags(xdrs, &objp->attributes)) {
return (FALSE);
}
if (!xdr_krb5_kvno(xdrs, &objp->kvno)) {
return (FALSE);
}
if (!xdr_krb5_kvno(xdrs, &objp->mkvno)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->policy)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->aux_attributes)) {
return (FALSE);
}
if (!xdr_krb5_deltat(xdrs, &objp->max_renewable_life)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->last_success)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->last_failed)) {
return (FALSE);
}
if (!xdr_krb5_kvno(xdrs, &objp->fail_auth_count)) {
return (FALSE);
}
if (!xdr_krb5_int16(xdrs, &objp->n_key_data)) {
return (FALSE);
}
if (!xdr_krb5_int16(xdrs, &objp->n_tl_data)) {
return (FALSE);
}
if (!xdr_nulltype(xdrs, (void **) &objp->tl_data,
xdr_krb5_tl_data)) {
return FALSE;
}
n = objp->n_key_data;
if (!xdr_array(xdrs, (caddr_t *) &objp->key_data,
&n, ~0, sizeof(krb5_key_data),
xdr_krb5_key_data_nocontents)) {
return (FALSE);
}
return (TRUE);
}
static bool_t
_xdr_kadm5_policy_ent_rec(XDR *xdrs, kadm5_policy_ent_rec *objp, int vers)
{
if (!xdr_nullstring(xdrs, &objp->policy)) {
return (FALSE);
}
/* these all used to be u_int32, but it's stupid for sized types
to be exposed at the api, and they're the same as longs on the
wire. */
if (!xdr_long(xdrs, &objp->pw_min_life)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->pw_max_life)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->pw_min_length)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->pw_min_classes)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->pw_history_num)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->policy_refcnt)) {
return (FALSE);
}
if (xdrs->x_op == XDR_DECODE) {
objp->pw_max_fail = 0;
objp->pw_failcnt_interval = 0;
objp->pw_lockout_duration = 0;
objp->attributes = 0;
objp->max_life = 0;
objp->max_renewable_life = 0;
objp->allowed_keysalts = NULL;
objp->n_tl_data = 0;
objp->tl_data = NULL;
}
if (vers >= KADM5_API_VERSION_3) {
if (!xdr_krb5_kvno(xdrs, &objp->pw_max_fail))
return (FALSE);
if (!xdr_krb5_deltat(xdrs, &objp->pw_failcnt_interval))
return (FALSE);
if (!xdr_krb5_deltat(xdrs, &objp->pw_lockout_duration))
return (FALSE);
}
if (vers >= KADM5_API_VERSION_4) {
if (!xdr_krb5_flags(xdrs, &objp->attributes)) {
return (FALSE);
}
if (!xdr_krb5_deltat(xdrs, &objp->max_life)) {
return (FALSE);
}
if (!xdr_krb5_deltat(xdrs, &objp->max_renewable_life)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->allowed_keysalts)) {
return (FALSE);
}
if (!xdr_krb5_int16(xdrs, &objp->n_tl_data)) {
return (FALSE);
}
if (!xdr_nulltype(xdrs, (void **) &objp->tl_data,
xdr_krb5_tl_data)) {
return FALSE;
}
}
return (TRUE);
}
bool_t
xdr_kadm5_policy_ent_rec(XDR *xdrs, kadm5_policy_ent_rec *objp)
{
return _xdr_kadm5_policy_ent_rec(xdrs, objp, KADM5_API_VERSION_4);
}
bool_t
xdr_cprinc_arg(XDR *xdrs, cprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!_xdr_kadm5_principal_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->passwd)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_cprinc3_arg(XDR *xdrs, cprinc3_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!_xdr_kadm5_principal_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *)&objp->ks_tuple,
(unsigned int *)&objp->n_ks_tuple, ~0,
sizeof(krb5_key_salt_tuple),
xdr_krb5_key_salt_tuple)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->passwd)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_generic_ret(XDR *xdrs, generic_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
return(TRUE);
}
bool_t
xdr_dprinc_arg(XDR *xdrs, dprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_mprinc_arg(XDR *xdrs, mprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!_xdr_kadm5_principal_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_rprinc_arg(XDR *xdrs, rprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->src)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->dest)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gprincs_arg(XDR *xdrs, gprincs_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->exp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gprincs_ret(XDR *xdrs, gprincs_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if (objp->code == KADM5_OK) {
if (!xdr_int(xdrs, &objp->count)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->princs,
(unsigned int *) &objp->count, ~0,
sizeof(char *), xdr_nullstring)) {
return (FALSE);
}
}
return (TRUE);
}
bool_t
xdr_chpass_arg(XDR *xdrs, chpass_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->pass)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_chpass3_arg(XDR *xdrs, chpass3_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_krb5_boolean(xdrs, &objp->keepold)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *)&objp->ks_tuple,
(unsigned int*)&objp->n_ks_tuple, ~0,
sizeof(krb5_key_salt_tuple),
xdr_krb5_key_salt_tuple)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->pass)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_setv4key_arg(XDR *xdrs, setv4key_arg *objp)
{
unsigned int n_keys = 1;
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->keyblock,
&n_keys, ~0,
sizeof(krb5_keyblock), xdr_krb5_keyblock)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_setkey_arg(XDR *xdrs, setkey_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->keyblocks,
(unsigned int *) &objp->n_keys, ~0,
sizeof(krb5_keyblock), xdr_krb5_keyblock)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_setkey3_arg(XDR *xdrs, setkey3_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_krb5_boolean(xdrs, &objp->keepold)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->ks_tuple,
(unsigned int *) &objp->n_ks_tuple, ~0,
sizeof(krb5_key_salt_tuple), xdr_krb5_key_salt_tuple)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->keyblocks,
(unsigned int *) &objp->n_keys, ~0,
sizeof(krb5_keyblock), xdr_krb5_keyblock)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_chrand_arg(XDR *xdrs, chrand_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_chrand3_arg(XDR *xdrs, chrand3_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_krb5_boolean(xdrs, &objp->keepold)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *)&objp->ks_tuple,
(unsigned int*)&objp->n_ks_tuple, ~0,
sizeof(krb5_key_salt_tuple),
xdr_krb5_key_salt_tuple)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_chrand_ret(XDR *xdrs, chrand_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if (objp->code == KADM5_OK) {
if (!xdr_array(xdrs, (char **)&objp->keys,
(unsigned int *)&objp->n_keys, ~0,
sizeof(krb5_keyblock), xdr_krb5_keyblock))
return FALSE;
}
return (TRUE);
}
bool_t
xdr_gprinc_arg(XDR *xdrs, gprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return FALSE;
}
return (TRUE);
}
bool_t
xdr_gprinc_ret(XDR *xdrs, gprinc_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if(objp->code == KADM5_OK) {
if (!_xdr_kadm5_principal_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
}
return (TRUE);
}
bool_t
xdr_cpol_arg(XDR *xdrs, cpol_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!_xdr_kadm5_policy_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_dpol_arg(XDR *xdrs, dpol_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->name)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_mpol_arg(XDR *xdrs, mpol_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!_xdr_kadm5_policy_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gpol_arg(XDR *xdrs, gpol_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->name)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gpol_ret(XDR *xdrs, gpol_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if(objp->code == KADM5_OK) {
if (!_xdr_kadm5_policy_ent_rec(xdrs, &objp->rec,
objp->api_version))
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gpols_arg(XDR *xdrs, gpols_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->exp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gpols_ret(XDR *xdrs, gpols_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if (objp->code == KADM5_OK) {
if (!xdr_int(xdrs, &objp->count)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->pols,
(unsigned int *) &objp->count, ~0,
sizeof(char *), xdr_nullstring)) {
return (FALSE);
}
}
return (TRUE);
}
bool_t xdr_getprivs_ret(XDR *xdrs, getprivs_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (! xdr_kadm5_ret_t(xdrs, &objp->code) ||
! xdr_long(xdrs, &objp->privs))
return FALSE;
return TRUE;
}
bool_t
xdr_purgekeys_arg(XDR *xdrs, purgekeys_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_int(xdrs, &objp->keepkvno)) {
return FALSE;
}
return (TRUE);
}
bool_t
xdr_gstrings_arg(XDR *xdrs, gstrings_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gstrings_ret(XDR *xdrs, gstrings_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if (objp->code == KADM5_OK) {
if (!xdr_int(xdrs, &objp->count)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->strings,
(unsigned int *) &objp->count, ~0,
sizeof(krb5_string_attr),
xdr_krb5_string_attr)) {
return (FALSE);
}
}
return (TRUE);
}
bool_t
xdr_sstring_arg(XDR *xdrs, sstring_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->key)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->value)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_krb5_principal(XDR *xdrs, krb5_principal *objp)
{
int ret;
char *p = NULL;
krb5_principal pr = NULL;
static krb5_context context = NULL;
/* using a static context here is ugly, but should work
ok, and the other solutions are even uglier */
if (!context &&
kadm5_init_krb5_context(&context))
return(FALSE);
switch(xdrs->x_op) {
case XDR_ENCODE:
if (*objp) {
if((ret = krb5_unparse_name(context, *objp, &p)) != 0)
return FALSE;
}
if(!xdr_nullstring(xdrs, &p))
return FALSE;
if (p) free(p);
break;
case XDR_DECODE:
if(!xdr_nullstring(xdrs, &p))
return FALSE;
if (p) {
ret = krb5_parse_name(context, p, &pr);
if(ret != 0)
return FALSE;
*objp = pr;
free(p);
} else
*objp = NULL;
break;
case XDR_FREE:
if(*objp != NULL)
krb5_free_principal(context, *objp);
*objp = NULL;
break;
}
return TRUE;
}
bool_t
xdr_krb5_octet(XDR *xdrs, krb5_octet *objp)
{
if (!xdr_u_char(xdrs, objp))
return (FALSE);
return (TRUE);
}
bool_t
xdr_krb5_enctype(XDR *xdrs, krb5_enctype *objp)
{
/*
* This used to be xdr_krb5_keytype, but keytypes and enctypes have
* been merged into only enctypes. However, randkey_principal
* already ensures that only a key of ENCTYPE_DES_CBC_CRC will be
* returned to v1 clients, and ENCTYPE_DES_CBC_CRC has the same
* value as KEYTYPE_DES used too, which is what all v1 clients
* expect. Therefore, IMHO, just encoding whatever enctype we get
* is safe.
*/
if (!xdr_int32(xdrs, (int32_t *) objp))
return (FALSE);
return (TRUE);
}
bool_t
xdr_krb5_salttype(XDR *xdrs, krb5_int32 *objp)
{
if (!xdr_int32(xdrs, (int32_t *) objp))
return FALSE;
return TRUE;
}
bool_t
xdr_krb5_keyblock(XDR *xdrs, krb5_keyblock *objp)
{
/* XXX This only works because free_keyblock assumes ->contents
is allocated by malloc() */
if(!xdr_krb5_enctype(xdrs, &objp->enctype))
return FALSE;
if(!xdr_bytes(xdrs, (char **) &objp->contents, (unsigned int *)
&objp->length, ~0))
return FALSE;
return TRUE;
}
bool_t
xdr_krb5_string_attr(XDR *xdrs, krb5_string_attr *objp)
{
if (!xdr_nullstring(xdrs, &objp->key))
return FALSE;
if (!xdr_nullstring(xdrs, &objp->value))
return FALSE;
if (xdrs->x_op == XDR_DECODE &&
(objp->key == NULL || objp->value == NULL))
return FALSE;
return TRUE;
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_1793_0 |
crossvul-cpp_data_good_5692_0 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright Darryl Miles G7LED (dlm@g7led.demon.co.uk)
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/stat.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <net/netrom.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/ip.h>
#include <net/tcp_states.h>
#include <net/arp.h>
#include <linux/init.h>
static int nr_ndevs = 4;
int sysctl_netrom_default_path_quality = NR_DEFAULT_QUAL;
int sysctl_netrom_obsolescence_count_initialiser = NR_DEFAULT_OBS;
int sysctl_netrom_network_ttl_initialiser = NR_DEFAULT_TTL;
int sysctl_netrom_transport_timeout = NR_DEFAULT_T1;
int sysctl_netrom_transport_maximum_tries = NR_DEFAULT_N2;
int sysctl_netrom_transport_acknowledge_delay = NR_DEFAULT_T2;
int sysctl_netrom_transport_busy_delay = NR_DEFAULT_T4;
int sysctl_netrom_transport_requested_window_size = NR_DEFAULT_WINDOW;
int sysctl_netrom_transport_no_activity_timeout = NR_DEFAULT_IDLE;
int sysctl_netrom_routing_control = NR_DEFAULT_ROUTING;
int sysctl_netrom_link_fails_count = NR_DEFAULT_FAILS;
int sysctl_netrom_reset_circuit = NR_DEFAULT_RESET;
static unsigned short circuit = 0x101;
static HLIST_HEAD(nr_list);
static DEFINE_SPINLOCK(nr_list_lock);
static const struct proto_ops nr_proto_ops;
/*
* NETROM network devices are virtual network devices encapsulating NETROM
* frames into AX.25 which will be sent through an AX.25 device, so form a
* special "super class" of normal net devices; split their locks off into a
* separate class since they always nest.
*/
static struct lock_class_key nr_netdev_xmit_lock_key;
static struct lock_class_key nr_netdev_addr_lock_key;
static void nr_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
{
lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
}
static void nr_set_lockdep_key(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
}
/*
* Socket removal during an interrupt is now safe.
*/
static void nr_remove_socket(struct sock *sk)
{
spin_lock_bh(&nr_list_lock);
sk_del_node_init(sk);
spin_unlock_bh(&nr_list_lock);
}
/*
* Kill all bound sockets on a dropped device.
*/
static void nr_kill_by_device(struct net_device *dev)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list)
if (nr_sk(s)->device == dev)
nr_disconnect(s, ENETUNREACH);
spin_unlock_bh(&nr_list_lock);
}
/*
* Handle device status changes.
*/
static int nr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = (struct net_device *)ptr;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (event != NETDEV_DOWN)
return NOTIFY_DONE;
nr_kill_by_device(dev);
nr_rt_device_down(dev);
return NOTIFY_DONE;
}
/*
* Add a socket to the bound sockets list.
*/
static void nr_insert_socket(struct sock *sk)
{
spin_lock_bh(&nr_list_lock);
sk_add_node(sk, &nr_list);
spin_unlock_bh(&nr_list_lock);
}
/*
* Find a socket that wants to accept the Connect Request we just
* received.
*/
static struct sock *nr_find_listener(ax25_address *addr)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list)
if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
s->sk_state == TCP_LISTEN) {
bh_lock_sock(s);
goto found;
}
s = NULL;
found:
spin_unlock_bh(&nr_list_lock);
return s;
}
/*
* Find a connected NET/ROM socket given my circuit IDs.
*/
static struct sock *nr_find_socket(unsigned char index, unsigned char id)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list) {
struct nr_sock *nr = nr_sk(s);
if (nr->my_index == index && nr->my_id == id) {
bh_lock_sock(s);
goto found;
}
}
s = NULL;
found:
spin_unlock_bh(&nr_list_lock);
return s;
}
/*
* Find a connected NET/ROM socket given their circuit IDs.
*/
static struct sock *nr_find_peer(unsigned char index, unsigned char id,
ax25_address *dest)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list) {
struct nr_sock *nr = nr_sk(s);
if (nr->your_index == index && nr->your_id == id &&
!ax25cmp(&nr->dest_addr, dest)) {
bh_lock_sock(s);
goto found;
}
}
s = NULL;
found:
spin_unlock_bh(&nr_list_lock);
return s;
}
/*
* Find next free circuit ID.
*/
static unsigned short nr_find_next_circuit(void)
{
unsigned short id = circuit;
unsigned char i, j;
struct sock *sk;
for (;;) {
i = id / 256;
j = id % 256;
if (i != 0 && j != 0) {
if ((sk=nr_find_socket(i, j)) == NULL)
break;
bh_unlock_sock(sk);
}
id++;
}
return id;
}
/*
* Deferred destroy.
*/
void nr_destroy_socket(struct sock *);
/*
* Handler for deferred kills.
*/
static void nr_destroy_timer(unsigned long data)
{
struct sock *sk=(struct sock *)data;
bh_lock_sock(sk);
sock_hold(sk);
nr_destroy_socket(sk);
bh_unlock_sock(sk);
sock_put(sk);
}
/*
* This is called from user mode and the timers. Thus it protects itself
* against interrupt users but doesn't worry about being called during
* work. Once it is removed from the queue no interrupt or bottom half
* will touch it and we are (fairly 8-) ) safe.
*/
void nr_destroy_socket(struct sock *sk)
{
struct sk_buff *skb;
nr_remove_socket(sk);
nr_stop_heartbeat(sk);
nr_stop_t1timer(sk);
nr_stop_t2timer(sk);
nr_stop_t4timer(sk);
nr_stop_idletimer(sk);
nr_clear_queues(sk); /* Flush the queues */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/* Queue the unaccepted socket for death */
sock_set_flag(skb->sk, SOCK_DEAD);
nr_start_heartbeat(skb->sk);
nr_sk(skb->sk)->state = NR_STATE_0;
}
kfree_skb(skb);
}
if (sk_has_allocations(sk)) {
/* Defer: outstanding buffers */
sk->sk_timer.function = nr_destroy_timer;
sk->sk_timer.expires = jiffies + 2 * HZ;
add_timer(&sk->sk_timer);
} else
sock_put(sk);
}
/*
* Handling for system calls applied via the various interfaces to a
* NET/ROM socket object.
*/
static int nr_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
unsigned long opt;
if (level != SOL_NETROM)
return -ENOPROTOOPT;
if (optlen < sizeof(unsigned int))
return -EINVAL;
if (get_user(opt, (unsigned int __user *)optval))
return -EFAULT;
switch (optname) {
case NETROM_T1:
if (opt < 1 || opt > ULONG_MAX / HZ)
return -EINVAL;
nr->t1 = opt * HZ;
return 0;
case NETROM_T2:
if (opt < 1 || opt > ULONG_MAX / HZ)
return -EINVAL;
nr->t2 = opt * HZ;
return 0;
case NETROM_N2:
if (opt < 1 || opt > 31)
return -EINVAL;
nr->n2 = opt;
return 0;
case NETROM_T4:
if (opt < 1 || opt > ULONG_MAX / HZ)
return -EINVAL;
nr->t4 = opt * HZ;
return 0;
case NETROM_IDLE:
if (opt > ULONG_MAX / (60 * HZ))
return -EINVAL;
nr->idle = opt * 60 * HZ;
return 0;
default:
return -ENOPROTOOPT;
}
}
static int nr_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
int val = 0;
int len;
if (level != SOL_NETROM)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch (optname) {
case NETROM_T1:
val = nr->t1 / HZ;
break;
case NETROM_T2:
val = nr->t2 / HZ;
break;
case NETROM_N2:
val = nr->n2;
break;
case NETROM_T4:
val = nr->t4 / HZ;
break;
case NETROM_IDLE:
val = nr->idle / (60 * HZ);
break;
default:
return -ENOPROTOOPT;
}
len = min_t(unsigned int, len, sizeof(int));
if (put_user(len, optlen))
return -EFAULT;
return copy_to_user(optval, &val, len) ? -EFAULT : 0;
}
static int nr_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
lock_sock(sk);
if (sk->sk_state != TCP_LISTEN) {
memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
release_sock(sk);
return 0;
}
release_sock(sk);
return -EOPNOTSUPP;
}
static struct proto nr_proto = {
.name = "NETROM",
.owner = THIS_MODULE,
.obj_size = sizeof(struct nr_sock),
};
static int nr_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct nr_sock *nr;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
if (sock->type != SOCK_SEQPACKET || protocol != 0)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto);
if (sk == NULL)
return -ENOMEM;
nr = nr_sk(sk);
sock_init_data(sock, sk);
sock->ops = &nr_proto_ops;
sk->sk_protocol = protocol;
skb_queue_head_init(&nr->ack_queue);
skb_queue_head_init(&nr->reseq_queue);
skb_queue_head_init(&nr->frag_queue);
nr_init_timers(sk);
nr->t1 =
msecs_to_jiffies(sysctl_netrom_transport_timeout);
nr->t2 =
msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay);
nr->n2 =
msecs_to_jiffies(sysctl_netrom_transport_maximum_tries);
nr->t4 =
msecs_to_jiffies(sysctl_netrom_transport_busy_delay);
nr->idle =
msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout);
nr->window = sysctl_netrom_transport_requested_window_size;
nr->bpqext = 1;
nr->state = NR_STATE_0;
return 0;
}
static struct sock *nr_make_new(struct sock *osk)
{
struct sock *sk;
struct nr_sock *nr, *onr;
if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot);
if (sk == NULL)
return NULL;
nr = nr_sk(sk);
sock_init_data(NULL, sk);
sk->sk_type = osk->sk_type;
sk->sk_priority = osk->sk_priority;
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
sk->sk_state = TCP_ESTABLISHED;
sock_copy_flags(sk, osk);
skb_queue_head_init(&nr->ack_queue);
skb_queue_head_init(&nr->reseq_queue);
skb_queue_head_init(&nr->frag_queue);
nr_init_timers(sk);
onr = nr_sk(osk);
nr->t1 = onr->t1;
nr->t2 = onr->t2;
nr->n2 = onr->n2;
nr->t4 = onr->t4;
nr->idle = onr->idle;
nr->window = onr->window;
nr->device = onr->device;
nr->bpqext = onr->bpqext;
return sk;
}
static int nr_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct nr_sock *nr;
if (sk == NULL) return 0;
sock_hold(sk);
sock_orphan(sk);
lock_sock(sk);
nr = nr_sk(sk);
switch (nr->state) {
case NR_STATE_0:
case NR_STATE_1:
case NR_STATE_2:
nr_disconnect(sk, 0);
nr_destroy_socket(sk);
break;
case NR_STATE_3:
nr_clear_queues(sk);
nr->n2count = 0;
nr_write_internal(sk, NR_DISCREQ);
nr_start_t1timer(sk);
nr_stop_t2timer(sk);
nr_stop_t4timer(sk);
nr_stop_idletimer(sk);
nr->state = NR_STATE_2;
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DESTROY);
break;
default:
break;
}
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
return 0;
}
static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr;
struct net_device *dev;
ax25_uid_assoc *user;
ax25_address *source;
lock_sock(sk);
if (!sock_flag(sk, SOCK_ZAPPED)) {
release_sock(sk);
return -EINVAL;
}
if (addr_len < sizeof(struct sockaddr_ax25) || addr_len > sizeof(struct full_sockaddr_ax25)) {
release_sock(sk);
return -EINVAL;
}
if (addr_len < (addr->fsa_ax25.sax25_ndigis * sizeof(ax25_address) + sizeof(struct sockaddr_ax25))) {
release_sock(sk);
return -EINVAL;
}
if (addr->fsa_ax25.sax25_family != AF_NETROM) {
release_sock(sk);
return -EINVAL;
}
if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) {
release_sock(sk);
return -EADDRNOTAVAIL;
}
/*
* Only the super user can set an arbitrary user callsign.
*/
if (addr->fsa_ax25.sax25_ndigis == 1) {
if (!capable(CAP_NET_BIND_SERVICE)) {
dev_put(dev);
release_sock(sk);
return -EPERM;
}
nr->user_addr = addr->fsa_digipeater[0];
nr->source_addr = addr->fsa_ax25.sax25_call;
} else {
source = &addr->fsa_ax25.sax25_call;
user = ax25_findbyuid(current_euid());
if (user) {
nr->user_addr = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
release_sock(sk);
dev_put(dev);
return -EPERM;
}
nr->user_addr = *source;
}
nr->source_addr = *source;
}
nr->device = dev;
nr_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
dev_put(dev);
release_sock(sk);
return 0;
}
static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
struct sockaddr_ax25 *addr = (struct sockaddr_ax25 *)uaddr;
ax25_address *source = NULL;
ax25_uid_assoc *user;
struct net_device *dev;
int err = 0;
lock_sock(sk);
if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
goto out_release; /* Connect completed during a ERESTARTSYS event */
}
if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
err = -ECONNREFUSED;
goto out_release;
}
if (sk->sk_state == TCP_ESTABLISHED) {
err = -EISCONN; /* No reconnect on a seqpacket socket */
goto out_release;
}
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25)) {
err = -EINVAL;
goto out_release;
}
if (addr->sax25_family != AF_NETROM) {
err = -EINVAL;
goto out_release;
}
if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */
sock_reset_flag(sk, SOCK_ZAPPED);
if ((dev = nr_dev_first()) == NULL) {
err = -ENETUNREACH;
goto out_release;
}
source = (ax25_address *)dev->dev_addr;
user = ax25_findbyuid(current_euid());
if (user) {
nr->user_addr = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_ADMIN)) {
dev_put(dev);
err = -EPERM;
goto out_release;
}
nr->user_addr = *source;
}
nr->source_addr = *source;
nr->device = dev;
dev_put(dev);
nr_insert_socket(sk); /* Finish the bind */
}
nr->dest_addr = addr->sax25_call;
release_sock(sk);
circuit = nr_find_next_circuit();
lock_sock(sk);
nr->my_index = circuit / 256;
nr->my_id = circuit % 256;
circuit++;
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
sk->sk_state = TCP_SYN_SENT;
nr_establish_data_link(sk);
nr->state = NR_STATE_1;
nr_start_heartbeat(sk);
/* Now the loop */
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
err = -EINPROGRESS;
goto out_release;
}
/*
* A Connect Ack with Choke or timeout or failed routing will go to
* closed.
*/
if (sk->sk_state == TCP_SYN_SENT) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
}
if (sk->sk_state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
err = sock_error(sk); /* Always set at this point */
goto out_release;
}
sock->state = SS_CONNECTED;
out_release:
release_sock(sk);
return err;
}
static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sk_buff *skb;
struct sock *newsk;
DEFINE_WAIT(wait);
struct sock *sk;
int err = 0;
if ((sk = sock->sk) == NULL)
return -EINVAL;
lock_sock(sk);
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out_release;
}
if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out_release;
}
/*
* The write queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
if (flags & O_NONBLOCK) {
err = -EWOULDBLOCK;
break;
}
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
newsk = skb->sk;
sock_graft(newsk, newsock);
/* Now attach up the new socket */
kfree_skb(skb);
sk_acceptq_removed(sk);
out_release:
release_sock(sk);
return err;
}
static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct full_sockaddr_ax25 *sax = (struct full_sockaddr_ax25 *)uaddr;
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
lock_sock(sk);
if (peer != 0) {
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
return -ENOTCONN;
}
sax->fsa_ax25.sax25_family = AF_NETROM;
sax->fsa_ax25.sax25_ndigis = 1;
sax->fsa_ax25.sax25_call = nr->user_addr;
memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater));
sax->fsa_digipeater[0] = nr->dest_addr;
*uaddr_len = sizeof(struct full_sockaddr_ax25);
} else {
sax->fsa_ax25.sax25_family = AF_NETROM;
sax->fsa_ax25.sax25_ndigis = 0;
sax->fsa_ax25.sax25_call = nr->source_addr;
*uaddr_len = sizeof(struct sockaddr_ax25);
}
release_sock(sk);
return 0;
}
int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
{
struct sock *sk;
struct sock *make;
struct nr_sock *nr_make;
ax25_address *src, *dest, *user;
unsigned short circuit_index, circuit_id;
unsigned short peer_circuit_index, peer_circuit_id;
unsigned short frametype, flags, window, timeout;
int ret;
skb->sk = NULL; /* Initially we don't know who it's for */
/*
* skb->data points to the netrom frame start
*/
src = (ax25_address *)(skb->data + 0);
dest = (ax25_address *)(skb->data + 7);
circuit_index = skb->data[15];
circuit_id = skb->data[16];
peer_circuit_index = skb->data[17];
peer_circuit_id = skb->data[18];
frametype = skb->data[19] & 0x0F;
flags = skb->data[19] & 0xF0;
/*
* Check for an incoming IP over NET/ROM frame.
*/
if (frametype == NR_PROTOEXT &&
circuit_index == NR_PROTO_IP && circuit_id == NR_PROTO_IP) {
skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
skb_reset_transport_header(skb);
return nr_rx_ip(skb, dev);
}
/*
* Find an existing socket connection, based on circuit ID, if it's
* a Connect Request base it on their circuit ID.
*
* Circuit ID 0/0 is not valid but it could still be a "reset" for a
* circuit that no longer exists at the other end ...
*/
sk = NULL;
if (circuit_index == 0 && circuit_id == 0) {
if (frametype == NR_CONNACK && flags == NR_CHOKE_FLAG)
sk = nr_find_peer(peer_circuit_index, peer_circuit_id, src);
} else {
if (frametype == NR_CONNREQ)
sk = nr_find_peer(circuit_index, circuit_id, src);
else
sk = nr_find_socket(circuit_index, circuit_id);
}
if (sk != NULL) {
skb_reset_transport_header(skb);
if (frametype == NR_CONNACK && skb->len == 22)
nr_sk(sk)->bpqext = 1;
else
nr_sk(sk)->bpqext = 0;
ret = nr_process_rx_frame(sk, skb);
bh_unlock_sock(sk);
return ret;
}
/*
* Now it should be a CONNREQ.
*/
if (frametype != NR_CONNREQ) {
/*
* Here it would be nice to be able to send a reset but
* NET/ROM doesn't have one. We've tried to extend the protocol
* by sending NR_CONNACK | NR_CHOKE_FLAGS replies but that
* apparently kills BPQ boxes... :-(
* So now we try to follow the established behaviour of
* G8PZT's Xrouter which is sending packets with command type 7
* as an extension of the protocol.
*/
if (sysctl_netrom_reset_circuit &&
(frametype != NR_RESET || flags != 0))
nr_transmit_reset(skb, 1);
return 0;
}
sk = nr_find_listener(dest);
user = (ax25_address *)(skb->data + 21);
if (sk == NULL || sk_acceptq_is_full(sk) ||
(make = nr_make_new(sk)) == NULL) {
nr_transmit_refusal(skb, 0);
if (sk)
bh_unlock_sock(sk);
return 0;
}
window = skb->data[20];
skb->sk = make;
make->sk_state = TCP_ESTABLISHED;
/* Fill in his circuit details */
nr_make = nr_sk(make);
nr_make->source_addr = *dest;
nr_make->dest_addr = *src;
nr_make->user_addr = *user;
nr_make->your_index = circuit_index;
nr_make->your_id = circuit_id;
bh_unlock_sock(sk);
circuit = nr_find_next_circuit();
bh_lock_sock(sk);
nr_make->my_index = circuit / 256;
nr_make->my_id = circuit % 256;
circuit++;
/* Window negotiation */
if (window < nr_make->window)
nr_make->window = window;
/* L4 timeout negotiation */
if (skb->len == 37) {
timeout = skb->data[36] * 256 + skb->data[35];
if (timeout * HZ < nr_make->t1)
nr_make->t1 = timeout * HZ;
nr_make->bpqext = 1;
} else {
nr_make->bpqext = 0;
}
nr_write_internal(make, NR_CONNACK);
nr_make->condition = 0x00;
nr_make->vs = 0;
nr_make->va = 0;
nr_make->vr = 0;
nr_make->vl = 0;
nr_make->state = NR_STATE_3;
sk_acceptq_added(sk);
skb_queue_head(&sk->sk_receive_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb->len);
bh_unlock_sock(sk);
nr_insert_socket(make);
nr_start_heartbeat(make);
nr_start_idletimer(make);
return 1;
}
static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
struct sockaddr_ax25 *usax = (struct sockaddr_ax25 *)msg->msg_name;
int err;
struct sockaddr_ax25 sax;
struct sk_buff *skb;
unsigned char *asmptr;
int size;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
return -EINVAL;
lock_sock(sk);
if (sock_flag(sk, SOCK_ZAPPED)) {
err = -EADDRNOTAVAIL;
goto out;
}
if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
err = -EPIPE;
goto out;
}
if (nr->device == NULL) {
err = -ENETUNREACH;
goto out;
}
if (usax) {
if (msg->msg_namelen < sizeof(sax)) {
err = -EINVAL;
goto out;
}
sax = *usax;
if (ax25cmp(&nr->dest_addr, &sax.sax25_call) != 0) {
err = -EISCONN;
goto out;
}
if (sax.sax25_family != AF_NETROM) {
err = -EINVAL;
goto out;
}
} else {
if (sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
sax.sax25_family = AF_NETROM;
sax.sax25_call = nr->dest_addr;
}
/* Build a packet - the conventional user limit is 236 bytes. We can
do ludicrously large NetROM frames but must not overflow */
if (len > 65536) {
err = -EMSGSIZE;
goto out;
}
size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
goto out;
skb_reserve(skb, size - len);
skb_reset_transport_header(skb);
/*
* Push down the NET/ROM header
*/
asmptr = skb_push(skb, NR_TRANSPORT_LEN);
/* Build a NET/ROM Transport header */
*asmptr++ = nr->your_index;
*asmptr++ = nr->your_id;
*asmptr++ = 0; /* To be filled in later */
*asmptr++ = 0; /* Ditto */
*asmptr++ = NR_INFO;
/*
* Put the data on the end
*/
skb_put(skb, len);
/* User data follows immediately after the NET/ROM transport header */
if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) {
kfree_skb(skb);
err = -EFAULT;
goto out;
}
if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
err = -ENOTCONN;
goto out;
}
nr_output(sk, skb); /* Shove it onto the queue */
err = len;
out:
release_sock(sk);
return err;
}
static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
size_t copied;
struct sk_buff *skb;
int er;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
lock_sock(sk);
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
return -ENOTCONN;
}
/* Now we can treat all alike */
if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {
release_sock(sk);
return er;
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (er < 0) {
skb_free_datagram(sk, skb);
release_sock(sk);
return er;
}
if (sax != NULL) {
memset(sax, 0, sizeof(*sax));
sax->sax25_family = AF_NETROM;
skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
AX25_ADDR_LEN);
}
msg->msg_namelen = sizeof(*sax);
skb_free_datagram(sk, skb);
release_sock(sk);
return copied;
}
static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *)arg;
int ret;
switch (cmd) {
case TIOCOUTQ: {
long amount;
lock_sock(sk);
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
release_sock(sk);
return put_user(amount, (int __user *)argp);
}
case TIOCINQ: {
struct sk_buff *skb;
long amount = 0L;
lock_sock(sk);
/* These two are safe on a single CPU system as only user tasks fiddle here */
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
release_sock(sk);
return put_user(amount, (int __user *)argp);
}
case SIOCGSTAMP:
lock_sock(sk);
ret = sock_get_timestamp(sk, argp);
release_sock(sk);
return ret;
case SIOCGSTAMPNS:
lock_sock(sk);
ret = sock_get_timestampns(sk, argp);
release_sock(sk);
return ret;
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFMETRIC:
case SIOCSIFMETRIC:
return -EINVAL;
case SIOCADDRT:
case SIOCDELRT:
case SIOCNRDECOBS:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
return nr_rt_ioctl(cmd, argp);
default:
return -ENOIOCTLCMD;
}
return 0;
}
#ifdef CONFIG_PROC_FS
static void *nr_info_start(struct seq_file *seq, loff_t *pos)
{
spin_lock_bh(&nr_list_lock);
return seq_hlist_start_head(&nr_list, *pos);
}
static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &nr_list, pos);
}
static void nr_info_stop(struct seq_file *seq, void *v)
{
spin_unlock_bh(&nr_list_lock);
}
static int nr_info_show(struct seq_file *seq, void *v)
{
struct sock *s = sk_entry(v);
struct net_device *dev;
struct nr_sock *nr;
const char *devname;
char buf[11];
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"user_addr dest_node src_node dev my your st vs vr va t1 t2 t4 idle n2 wnd Snd-Q Rcv-Q inode\n");
else {
bh_lock_sock(s);
nr = nr_sk(s);
if ((dev = nr->device) == NULL)
devname = "???";
else
devname = dev->name;
seq_printf(seq, "%-9s ", ax2asc(buf, &nr->user_addr));
seq_printf(seq, "%-9s ", ax2asc(buf, &nr->dest_addr));
seq_printf(seq,
"%-9s %-3s %02X/%02X %02X/%02X %2d %3d %3d %3d %3lu/%03lu %2lu/%02lu %3lu/%03lu %3lu/%03lu %2d/%02d %3d %5d %5d %ld\n",
ax2asc(buf, &nr->source_addr),
devname,
nr->my_index,
nr->my_id,
nr->your_index,
nr->your_id,
nr->state,
nr->vs,
nr->vr,
nr->va,
ax25_display_timer(&nr->t1timer) / HZ,
nr->t1 / HZ,
ax25_display_timer(&nr->t2timer) / HZ,
nr->t2 / HZ,
ax25_display_timer(&nr->t4timer) / HZ,
nr->t4 / HZ,
ax25_display_timer(&nr->idletimer) / (60 * HZ),
nr->idle / (60 * HZ),
nr->n2count,
nr->n2,
nr->window,
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
bh_unlock_sock(s);
}
return 0;
}
static const struct seq_operations nr_info_seqops = {
.start = nr_info_start,
.next = nr_info_next,
.stop = nr_info_stop,
.show = nr_info_show,
};
static int nr_info_open(struct inode *inode, struct file *file)
{
return seq_open(file, &nr_info_seqops);
}
static const struct file_operations nr_info_fops = {
.owner = THIS_MODULE,
.open = nr_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
static const struct net_proto_family nr_family_ops = {
.family = PF_NETROM,
.create = nr_create,
.owner = THIS_MODULE,
};
static const struct proto_ops nr_proto_ops = {
.family = PF_NETROM,
.owner = THIS_MODULE,
.release = nr_release,
.bind = nr_bind,
.connect = nr_connect,
.socketpair = sock_no_socketpair,
.accept = nr_accept,
.getname = nr_getname,
.poll = datagram_poll,
.ioctl = nr_ioctl,
.listen = nr_listen,
.shutdown = sock_no_shutdown,
.setsockopt = nr_setsockopt,
.getsockopt = nr_getsockopt,
.sendmsg = nr_sendmsg,
.recvmsg = nr_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static struct notifier_block nr_dev_notifier = {
.notifier_call = nr_device_event,
};
static struct net_device **dev_nr;
static struct ax25_protocol nr_pid = {
.pid = AX25_P_NETROM,
.func = nr_route_frame
};
static struct ax25_linkfail nr_linkfail_notifier = {
.func = nr_link_failed,
};
static int __init nr_proto_init(void)
{
int i;
int rc = proto_register(&nr_proto, 0);
if (rc != 0)
goto out;
if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
return -1;
}
dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL);
if (dev_nr == NULL) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
return -1;
}
for (i = 0; i < nr_ndevs; i++) {
char name[IFNAMSIZ];
struct net_device *dev;
sprintf(name, "nr%d", i);
dev = alloc_netdev(0, name, nr_setup);
if (!dev) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
goto fail;
}
dev->base_addr = i;
if (register_netdev(dev)) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
free_netdev(dev);
goto fail;
}
nr_set_lockdep_key(dev);
dev_nr[i] = dev;
}
if (sock_register(&nr_family_ops)) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
goto fail;
}
register_netdevice_notifier(&nr_dev_notifier);
ax25_register_pid(&nr_pid);
ax25_linkfail_register(&nr_linkfail_notifier);
#ifdef CONFIG_SYSCTL
nr_register_sysctl();
#endif
nr_loopback_init();
proc_create("nr", S_IRUGO, init_net.proc_net, &nr_info_fops);
proc_create("nr_neigh", S_IRUGO, init_net.proc_net, &nr_neigh_fops);
proc_create("nr_nodes", S_IRUGO, init_net.proc_net, &nr_nodes_fops);
out:
return rc;
fail:
while (--i >= 0) {
unregister_netdev(dev_nr[i]);
free_netdev(dev_nr[i]);
}
kfree(dev_nr);
proto_unregister(&nr_proto);
rc = -1;
goto out;
}
module_init(nr_proto_init);
module_param(nr_ndevs, int, 0);
MODULE_PARM_DESC(nr_ndevs, "number of NET/ROM devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
MODULE_DESCRIPTION("The amateur radio NET/ROM network and transport layer protocol");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_NETROM);
static void __exit nr_exit(void)
{
int i;
remove_proc_entry("nr", init_net.proc_net);
remove_proc_entry("nr_neigh", init_net.proc_net);
remove_proc_entry("nr_nodes", init_net.proc_net);
nr_loopback_clear();
nr_rt_free();
#ifdef CONFIG_SYSCTL
nr_unregister_sysctl();
#endif
ax25_linkfail_release(&nr_linkfail_notifier);
ax25_protocol_release(AX25_P_NETROM);
unregister_netdevice_notifier(&nr_dev_notifier);
sock_unregister(PF_NETROM);
for (i = 0; i < nr_ndevs; i++) {
struct net_device *dev = dev_nr[i];
if (dev) {
unregister_netdev(dev);
free_netdev(dev);
}
}
kfree(dev_nr);
proto_unregister(&nr_proto);
}
module_exit(nr_exit);
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_5692_0 |
crossvul-cpp_data_bad_1694_0 | /*
md.c : Multiple Devices driver for Linux
Copyright (C) 1998, 1999, 2000 Ingo Molnar
completely rewritten, based on the MD driver code from Marc Zyngier
Changes:
- RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
- RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
- boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
- kerneld support by Boris Tobotras <boris@xtalk.msk.su>
- kmod support by: Cyrus Durgin
- RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
- Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
- lots of fixes and improvements to the RAID1/RAID5 and generic
RAID code (such as request based resynchronization):
Neil Brown <neilb@cse.unsw.edu.au>.
- persistent bitmap code
Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kthread.h>
#include <linux/blkdev.h>
#include <linux/sysctl.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/hdreg.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/file.h>
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/raid/md_p.h>
#include <linux/raid/md_u.h>
#include <linux/slab.h>
#include "md.h"
#include "bitmap.h"
#include "md-cluster.h"
#ifndef MODULE
static void autostart_arrays(int part);
#endif
/* pers_list is a list of registered personalities protected
* by pers_lock.
* pers_lock does extra service to protect accesses to
* mddev->thread when the mutex cannot be held.
*/
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
struct md_cluster_operations *md_cluster_ops;
EXPORT_SYMBOL(md_cluster_ops);
struct module *md_cluster_mod;
EXPORT_SYMBOL(md_cluster_mod);
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
static struct workqueue_struct *md_misc_wq;
static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *this);
static void mddev_detach(struct mddev *mddev);
/*
* Default number of read corrections we'll attempt on an rdev
* before ejecting it from the array. We divide the read error
* count by 2 for every hour elapsed between read errors.
*/
#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
/*
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
* is 1000 KB/sec, so the extra system load does not show up that much.
* Increase it if you want to have more _guaranteed_ speed. Note that
* the RAID driver will use the maximum available bandwidth if the IO
* subsystem is idle. There is also an 'absolute maximum' reconstruction
* speed limit - in case reconstruction slows down your system despite
* idle IO detection.
*
* you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
* or /sys/block/mdX/md/sync_speed_{min,max}
*/
static int sysctl_speed_limit_min = 1000;
static int sysctl_speed_limit_max = 200000;
static inline int speed_min(struct mddev *mddev)
{
return mddev->sync_speed_min ?
mddev->sync_speed_min : sysctl_speed_limit_min;
}
static inline int speed_max(struct mddev *mddev)
{
return mddev->sync_speed_max ?
mddev->sync_speed_max : sysctl_speed_limit_max;
}
static struct ctl_table_header *raid_table_header;
static struct ctl_table raid_table[] = {
{
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
.maxlen = sizeof(int),
.mode = S_IRUGO|S_IWUSR,
.proc_handler = proc_dointvec,
},
{
.procname = "speed_limit_max",
.data = &sysctl_speed_limit_max,
.maxlen = sizeof(int),
.mode = S_IRUGO|S_IWUSR,
.proc_handler = proc_dointvec,
},
{ }
};
static struct ctl_table raid_dir_table[] = {
{
.procname = "raid",
.maxlen = 0,
.mode = S_IRUGO|S_IXUGO,
.child = raid_table,
},
{ }
};
static struct ctl_table raid_root_table[] = {
{
.procname = "dev",
.maxlen = 0,
.mode = 0555,
.child = raid_dir_table,
},
{ }
};
static const struct block_device_operations md_fops;
static int start_readonly;
/* bio_clone_mddev
* like bio_clone, but with a local bio set
*/
struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev)
{
struct bio *b;
if (!mddev || !mddev->bio_set)
return bio_alloc(gfp_mask, nr_iovecs);
b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
if (!b)
return NULL;
return b;
}
EXPORT_SYMBOL_GPL(bio_alloc_mddev);
struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
struct mddev *mddev)
{
if (!mddev || !mddev->bio_set)
return bio_clone(bio, gfp_mask);
return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
}
EXPORT_SYMBOL_GPL(bio_clone_mddev);
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
* can use 'poll' or 'select' to find out when the event
* count increases.
*
* Events are:
* start array, stop array, error, add device, remove device,
* start build, activate spare
*/
static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
static atomic_t md_event_count;
void md_new_event(struct mddev *mddev)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
}
EXPORT_SYMBOL_GPL(md_new_event);
/* Alternate version that can be called from interrupts
* when calling sysfs_notify isn't needed.
*/
static void md_new_event_inintr(struct mddev *mddev)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
}
/*
* Enables to iterate over all existing md arrays
* all_mddevs_lock protects this list.
*/
static LIST_HEAD(all_mddevs);
static DEFINE_SPINLOCK(all_mddevs_lock);
/*
* iterates through all used mddevs in the system.
* We take care to grab the all_mddevs_lock whenever navigating
* the list, and to always hold a refcount when unlocked.
* Any code which breaks out of this loop while own
* a reference to the current mddev and must mddev_put it.
*/
#define for_each_mddev(_mddev,_tmp) \
\
for (({ spin_lock(&all_mddevs_lock); \
_tmp = all_mddevs.next; \
_mddev = NULL;}); \
({ if (_tmp != &all_mddevs) \
mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
spin_unlock(&all_mddevs_lock); \
if (_mddev) mddev_put(_mddev); \
_mddev = list_entry(_tmp, struct mddev, all_mddevs); \
_tmp != &all_mddevs;}); \
({ spin_lock(&all_mddevs_lock); \
_tmp = _tmp->next;}) \
)
/* Rather than calling directly into the personality make_request function,
* IO requests come here first so that we can check if the device is
* being suspended pending a reconfiguration.
* We hold a refcount over the call to ->make_request. By the time that
* call has finished, the bio has been linked into some internal structure
* and so is visible to ->quiesce(), so we don't need the refcount any more.
*/
static void md_make_request(struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
struct mddev *mddev = q->queuedata;
unsigned int sectors;
int cpu;
if (mddev == NULL || mddev->pers == NULL
|| !mddev->ready) {
bio_io_error(bio);
return;
}
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
return;
}
smp_rmb(); /* Ensure implications of 'active' are visible */
rcu_read_lock();
if (mddev->suspended) {
DEFINE_WAIT(__wait);
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
TASK_UNINTERRUPTIBLE);
if (!mddev->suspended)
break;
rcu_read_unlock();
schedule();
rcu_read_lock();
}
finish_wait(&mddev->sb_wait, &__wait);
}
atomic_inc(&mddev->active_io);
rcu_read_unlock();
/*
* save the sectors now since our bio can
* go away inside make_request
*/
sectors = bio_sectors(bio);
mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
part_stat_unlock();
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);
}
/* mddev_suspend makes sure no new requests are submitted
* to the device, and that any requests that have been submitted
* are completely handled.
* Once mddev_detach() is called and completes, the module will be
* completely unused.
*/
void mddev_suspend(struct mddev *mddev)
{
BUG_ON(mddev->suspended);
mddev->suspended = 1;
synchronize_rcu();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
del_timer_sync(&mddev->safemode_timer);
}
EXPORT_SYMBOL_GPL(mddev_suspend);
void mddev_resume(struct mddev *mddev)
{
mddev->suspended = 0;
wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
}
EXPORT_SYMBOL_GPL(mddev_resume);
int mddev_congested(struct mddev *mddev, int bits)
{
struct md_personality *pers = mddev->pers;
int ret = 0;
rcu_read_lock();
if (mddev->suspended)
ret = 1;
else if (pers && pers->congested)
ret = pers->congested(mddev, bits);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(mddev_congested);
static int md_congested(void *data, int bits)
{
struct mddev *mddev = data;
return mddev_congested(mddev, bits);
}
static int md_mergeable_bvec(struct request_queue *q,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
struct mddev *mddev = q->queuedata;
int ret;
rcu_read_lock();
if (mddev->suspended) {
/* Must always allow one vec */
if (bvm->bi_size == 0)
ret = biovec->bv_len;
else
ret = 0;
} else {
struct md_personality *pers = mddev->pers;
if (pers && pers->mergeable_bvec)
ret = pers->mergeable_bvec(mddev, bvm, biovec);
else
ret = biovec->bv_len;
}
rcu_read_unlock();
return ret;
}
/*
* Generic flush handling for md
*/
static void md_end_flush(struct bio *bio, int err)
{
struct md_rdev *rdev = bio->bi_private;
struct mddev *mddev = rdev->mddev;
rdev_dec_pending(rdev, mddev);
if (atomic_dec_and_test(&mddev->flush_pending)) {
/* The pre-request flush has finished */
queue_work(md_wq, &mddev->flush_work);
}
bio_put(bio);
}
static void md_submit_flush_data(struct work_struct *ws);
static void submit_flushes(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct md_rdev *rdev;
INIT_WORK(&mddev->flush_work, md_submit_flush_data);
atomic_set(&mddev->flush_pending, 1);
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags)) {
/* Take two references, one is dropped
* when request finishes, one after
* we reclaim rcu_read_lock
*/
struct bio *bi;
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
atomic_inc(&mddev->flush_pending);
submit_bio(WRITE_FLUSH, bi);
rcu_read_lock();
rdev_dec_pending(rdev, mddev);
}
rcu_read_unlock();
if (atomic_dec_and_test(&mddev->flush_pending))
queue_work(md_wq, &mddev->flush_work);
}
static void md_submit_flush_data(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct bio *bio = mddev->flush_bio;
if (bio->bi_iter.bi_size == 0)
/* an empty barrier - all done */
bio_endio(bio, 0);
else {
bio->bi_rw &= ~REQ_FLUSH;
mddev->pers->make_request(mddev, bio);
}
mddev->flush_bio = NULL;
wake_up(&mddev->sb_wait);
}
void md_flush_request(struct mddev *mddev, struct bio *bio)
{
spin_lock_irq(&mddev->lock);
wait_event_lock_irq(mddev->sb_wait,
!mddev->flush_bio,
mddev->lock);
mddev->flush_bio = bio;
spin_unlock_irq(&mddev->lock);
INIT_WORK(&mddev->flush_work, submit_flushes);
queue_work(md_wq, &mddev->flush_work);
}
EXPORT_SYMBOL(md_flush_request);
void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
struct mddev *mddev = cb->data;
md_wakeup_thread(mddev->thread);
kfree(cb);
}
EXPORT_SYMBOL(md_unplug);
static inline struct mddev *mddev_get(struct mddev *mddev)
{
atomic_inc(&mddev->active);
return mddev;
}
static void mddev_delayed_delete(struct work_struct *ws);
static void mddev_put(struct mddev *mddev)
{
struct bio_set *bs = NULL;
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
list_del_init(&mddev->all_mddevs);
bs = mddev->bio_set;
mddev->bio_set = NULL;
if (mddev->gendisk) {
/* We did a probe so need to clean up. Call
* queue_work inside the spinlock so that
* flush_workqueue() after mddev_find will
* succeed in waiting for the work to be done.
*/
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
queue_work(md_misc_wq, &mddev->del_work);
} else
kfree(mddev);
}
spin_unlock(&all_mddevs_lock);
if (bs)
bioset_free(bs);
}
void mddev_init(struct mddev *mddev)
{
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
mutex_init(&mddev->bitmap_info.mutex);
INIT_LIST_HEAD(&mddev->disks);
INIT_LIST_HEAD(&mddev->all_mddevs);
init_timer(&mddev->safemode_timer);
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
spin_lock_init(&mddev->lock);
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
init_waitqueue_head(&mddev->recovery_wait);
mddev->reshape_position = MaxSector;
mddev->reshape_backwards = 0;
mddev->last_sync_action = "none";
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE;
}
EXPORT_SYMBOL_GPL(mddev_init);
static struct mddev *mddev_find(dev_t unit)
{
struct mddev *mddev, *new = NULL;
if (unit && MAJOR(unit) != MD_MAJOR)
unit &= ~((1<<MdpMinorShift)-1);
retry:
spin_lock(&all_mddevs_lock);
if (unit) {
list_for_each_entry(mddev, &all_mddevs, all_mddevs)
if (mddev->unit == unit) {
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
kfree(new);
return mddev;
}
if (new) {
list_add(&new->all_mddevs, &all_mddevs);
spin_unlock(&all_mddevs_lock);
new->hold_active = UNTIL_IOCTL;
return new;
}
} else if (new) {
/* find an unused unit number */
static int next_minor = 512;
int start = next_minor;
int is_free = 0;
int dev = 0;
while (!is_free) {
dev = MKDEV(MD_MAJOR, next_minor);
next_minor++;
if (next_minor > MINORMASK)
next_minor = 0;
if (next_minor == start) {
/* Oh dear, all in use. */
spin_unlock(&all_mddevs_lock);
kfree(new);
return NULL;
}
is_free = 1;
list_for_each_entry(mddev, &all_mddevs, all_mddevs)
if (mddev->unit == dev) {
is_free = 0;
break;
}
}
new->unit = dev;
new->md_minor = MINOR(dev);
new->hold_active = UNTIL_STOP;
list_add(&new->all_mddevs, &all_mddevs);
spin_unlock(&all_mddevs_lock);
return new;
}
spin_unlock(&all_mddevs_lock);
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
new->unit = unit;
if (MAJOR(unit) == MD_MAJOR)
new->md_minor = MINOR(unit);
else
new->md_minor = MINOR(unit) >> MdpMinorShift;
mddev_init(new);
goto retry;
}
static struct attribute_group md_redundancy_group;
void mddev_unlock(struct mddev *mddev)
{
if (mddev->to_remove) {
/* These cannot be removed under reconfig_mutex as
* an access to the files will try to take reconfig_mutex
* while holding the file unremovable, which leads to
* a deadlock.
* So hold set sysfs_active while the remove in happeing,
* and anything else which might set ->to_remove or my
* otherwise change the sysfs namespace will fail with
* -EBUSY if sysfs_active is still set.
* We set sysfs_active under reconfig_mutex and elsewhere
* test it under the same mutex to ensure its correct value
* is seen.
*/
struct attribute_group *to_remove = mddev->to_remove;
mddev->to_remove = NULL;
mddev->sysfs_active = 1;
mutex_unlock(&mddev->reconfig_mutex);
if (mddev->kobj.sd) {
if (to_remove != &md_redundancy_group)
sysfs_remove_group(&mddev->kobj, to_remove);
if (mddev->pers == NULL ||
mddev->pers->sync_request == NULL) {
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
if (mddev->sysfs_action)
sysfs_put(mddev->sysfs_action);
mddev->sysfs_action = NULL;
}
}
mddev->sysfs_active = 0;
} else
mutex_unlock(&mddev->reconfig_mutex);
/* As we've dropped the mutex we need a spinlock to
* make sure the thread doesn't disappear
*/
spin_lock(&pers_lock);
md_wakeup_thread(mddev->thread);
spin_unlock(&pers_lock);
}
EXPORT_SYMBOL_GPL(mddev_unlock);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
{
struct md_rdev *rdev;
rdev_for_each_rcu(rdev, mddev)
if (rdev->desc_nr == nr)
return rdev;
return NULL;
}
EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
{
struct md_rdev *rdev;
rdev_for_each(rdev, mddev)
if (rdev->bdev->bd_dev == dev)
return rdev;
return NULL;
}
static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
{
struct md_rdev *rdev;
rdev_for_each_rcu(rdev, mddev)
if (rdev->bdev->bd_dev == dev)
return rdev;
return NULL;
}
static struct md_personality *find_pers(int level, char *clevel)
{
struct md_personality *pers;
list_for_each_entry(pers, &pers_list, list) {
if (level != LEVEL_NONE && pers->level == level)
return pers;
if (strcmp(pers->name, clevel)==0)
return pers;
}
return NULL;
}
/* return the offset of the super block in 512byte sectors */
static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
{
sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
return MD_NEW_SIZE_SECTORS(num_sectors);
}
static int alloc_disk_sb(struct md_rdev *rdev)
{
rdev->sb_page = alloc_page(GFP_KERNEL);
if (!rdev->sb_page) {
printk(KERN_ALERT "md: out of memory.\n");
return -ENOMEM;
}
return 0;
}
void md_rdev_clear(struct md_rdev *rdev)
{
if (rdev->sb_page) {
put_page(rdev->sb_page);
rdev->sb_loaded = 0;
rdev->sb_page = NULL;
rdev->sb_start = 0;
rdev->sectors = 0;
}
if (rdev->bb_page) {
put_page(rdev->bb_page);
rdev->bb_page = NULL;
}
kfree(rdev->badblocks.page);
rdev->badblocks.page = NULL;
}
EXPORT_SYMBOL_GPL(md_rdev_clear);
static void super_written(struct bio *bio, int error)
{
struct md_rdev *rdev = bio->bi_private;
struct mddev *mddev = rdev->mddev;
if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk("md: super_written gets error=%d, uptodate=%d\n",
error, test_bit(BIO_UPTODATE, &bio->bi_flags));
WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
md_error(mddev, rdev);
}
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
bio_put(bio);
}
void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page)
{
/* write first size bytes of page to sector of rdev
* Increment mddev->pending_writes before returning
* and decrement it on completion, waking up sb_wait
* if zero is reached.
* If an error occurred, call md_error
*/
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
atomic_inc(&mddev->pending_writes);
submit_bio(WRITE_FLUSH_FUA, bio);
}
void md_super_wait(struct mddev *mddev)
{
/* wait for all superblock writes that were scheduled to complete */
wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
}
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int rw, bool metadata_op)
{
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
int ret;
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
rdev->meta_bdev : rdev->bdev;
if (metadata_op)
bio->bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
(rdev->mddev->reshape_backwards ==
(sector >= rdev->mddev->reshape_position)))
bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
else
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
submit_bio_wait(rw, bio);
ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
bio_put(bio);
return ret;
}
EXPORT_SYMBOL_GPL(sync_page_io);
static int read_disk_sb(struct md_rdev *rdev, int size)
{
char b[BDEVNAME_SIZE];
if (rdev->sb_loaded)
return 0;
if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
goto fail;
rdev->sb_loaded = 1;
return 0;
fail:
printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
return sb1->set_uuid0 == sb2->set_uuid0 &&
sb1->set_uuid1 == sb2->set_uuid1 &&
sb1->set_uuid2 == sb2->set_uuid2 &&
sb1->set_uuid3 == sb2->set_uuid3;
}
static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
int ret;
mdp_super_t *tmp1, *tmp2;
tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
if (!tmp1 || !tmp2) {
ret = 0;
printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
goto abort;
}
*tmp1 = *sb1;
*tmp2 = *sb2;
/*
* nr_disks is not constant
*/
tmp1->nr_disks = 0;
tmp2->nr_disks = 0;
ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
abort:
kfree(tmp1);
kfree(tmp2);
return ret;
}
static u32 md_csum_fold(u32 csum)
{
csum = (csum & 0xffff) + (csum >> 16);
return (csum & 0xffff) + (csum >> 16);
}
static unsigned int calc_sb_csum(mdp_super_t *sb)
{
u64 newcsum = 0;
u32 *sb32 = (u32*)sb;
int i;
unsigned int disk_csum, csum;
disk_csum = sb->sb_csum;
sb->sb_csum = 0;
for (i = 0; i < MD_SB_BYTES/4 ; i++)
newcsum += sb32[i];
csum = (newcsum & 0xffffffff) + (newcsum>>32);
#ifdef CONFIG_ALPHA
/* This used to use csum_partial, which was wrong for several
* reasons including that different results are returned on
* different architectures. It isn't critical that we get exactly
* the same return value as before (we always csum_fold before
* testing, and that removes any differences). However as we
* know that csum_partial always returned a 16bit value on
* alphas, do a fold to maximise conformity to previous behaviour.
*/
sb->sb_csum = md_csum_fold(disk_csum);
#else
sb->sb_csum = disk_csum;
#endif
return csum;
}
/*
* Handle superblock details.
* We want to be able to handle multiple superblock formats
* so we have a common interface to them all, and an array of
* different handlers.
* We rely on user-space to write the initial superblock, and support
* reading and updating of superblocks.
* Interface methods are:
* int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
* loads and validates a superblock on dev.
* if refdev != NULL, compare superblocks on both devices
* Return:
* 0 - dev has a superblock that is compatible with refdev
* 1 - dev has a superblock that is compatible and newer than refdev
* so dev should be used as the refdev in future
* -EINVAL superblock incompatible or invalid
* -othererror e.g. -EIO
*
* int validate_super(struct mddev *mddev, struct md_rdev *dev)
* Verify that dev is acceptable into mddev.
* The first time, mddev->raid_disks will be 0, and data from
* dev should be merged in. Subsequent calls check that dev
* is new enough. Return 0 or -EINVAL
*
* void sync_super(struct mddev *mddev, struct md_rdev *dev)
* Update the superblock for rdev with data in mddev
* This does not write to disc.
*
*/
struct super_type {
char *name;
struct module *owner;
int (*load_super)(struct md_rdev *rdev,
struct md_rdev *refdev,
int minor_version);
int (*validate_super)(struct mddev *mddev,
struct md_rdev *rdev);
void (*sync_super)(struct mddev *mddev,
struct md_rdev *rdev);
unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
sector_t num_sectors);
int (*allow_new_offset)(struct md_rdev *rdev,
unsigned long long new_offset);
};
/*
* Check that the given mddev has no bitmap.
*
* This function is called from the run method of all personalities that do not
* support bitmaps. It prints an error message and returns non-zero if mddev
* has a bitmap. Otherwise, it returns 0.
*
*/
int md_check_no_bitmap(struct mddev *mddev)
{
if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
return 0;
printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
mdname(mddev), mddev->pers->name);
return 1;
}
EXPORT_SYMBOL(md_check_no_bitmap);
/*
* load_super for 0.90.0
*/
static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
{
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
mdp_super_t *sb;
int ret;
/*
* Calculate the position of the superblock (512byte sectors),
* it's at the end of the disk.
*
* It also happens to be a multiple of 4Kb.
*/
rdev->sb_start = calc_dev_sboffset(rdev);
ret = read_disk_sb(rdev, MD_SB_BYTES);
if (ret) return ret;
ret = -EINVAL;
bdevname(rdev->bdev, b);
sb = page_address(rdev->sb_page);
if (sb->md_magic != MD_SB_MAGIC) {
printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
b);
goto abort;
}
if (sb->major_version != 0 ||
sb->minor_version < 90 ||
sb->minor_version > 91) {
printk(KERN_WARNING "Bad version number %d.%d on %s\n",
sb->major_version, sb->minor_version,
b);
goto abort;
}
if (sb->raid_disks <= 0)
goto abort;
if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
b);
goto abort;
}
rdev->preferred_minor = sb->md_minor;
rdev->data_offset = 0;
rdev->new_data_offset = 0;
rdev->sb_size = MD_SB_BYTES;
rdev->badblocks.shift = -1;
if (sb->level == LEVEL_MULTIPATH)
rdev->desc_nr = -1;
else
rdev->desc_nr = sb->this_disk.number;
if (!refdev) {
ret = 1;
} else {
__u64 ev1, ev2;
mdp_super_t *refsb = page_address(refdev->sb_page);
if (!uuid_equal(refsb, sb)) {
printk(KERN_WARNING "md: %s has different UUID to %s\n",
b, bdevname(refdev->bdev,b2));
goto abort;
}
if (!sb_equal(refsb, sb)) {
printk(KERN_WARNING "md: %s has same UUID"
" but different superblock to %s\n",
b, bdevname(refdev->bdev, b2));
goto abort;
}
ev1 = md_event(sb);
ev2 = md_event(refsb);
if (ev1 > ev2)
ret = 1;
else
ret = 0;
}
rdev->sectors = rdev->sb_start;
/* Limit to 4TB as metadata cannot record more than that.
* (not needed for Linear and RAID0 as metadata doesn't
* record this size)
*/
if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
rdev->sectors = (2ULL << 32) - 2;
if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
/* "this cannot possibly happen" ... */
ret = -EINVAL;
abort:
return ret;
}
/*
* validate_super for 0.90.0
*/
static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
{
mdp_disk_t *desc;
mdp_super_t *sb = page_address(rdev->sb_page);
__u64 ev1 = md_event(sb);
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
mddev->major_version = 0;
mddev->minor_version = sb->minor_version;
mddev->patch_version = sb->patch_version;
mddev->external = 0;
mddev->chunk_sectors = sb->chunk_size >> 9;
mddev->ctime = sb->ctime;
mddev->utime = sb->utime;
mddev->level = sb->level;
mddev->clevel[0] = 0;
mddev->layout = sb->layout;
mddev->raid_disks = sb->raid_disks;
mddev->dev_sectors = ((sector_t)sb->size) * 2;
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.space = 0;
/* bitmap can use 60 K after the 4K superblocks */
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
mddev->reshape_backwards = 0;
if (mddev->minor_version >= 91) {
mddev->reshape_position = sb->reshape_position;
mddev->delta_disks = sb->delta_disks;
mddev->new_level = sb->new_level;
mddev->new_layout = sb->new_layout;
mddev->new_chunk_sectors = sb->new_chunk >> 9;
if (mddev->delta_disks < 0)
mddev->reshape_backwards = 1;
} else {
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
if (sb->state & (1<<MD_SB_CLEAN))
mddev->recovery_cp = MaxSector;
else {
if (sb->events_hi == sb->cp_events_hi &&
sb->events_lo == sb->cp_events_lo) {
mddev->recovery_cp = sb->recovery_cp;
} else
mddev->recovery_cp = 0;
}
memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
mddev->max_disks = MD_SB_DISKS;
if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
mddev->bitmap_info.file == NULL) {
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
}
} else if (mddev->pers == NULL) {
/* Insist on good event counter while assembling, except
* for spares (which don't need an event count) */
++ev1;
if (sb->disks[rdev->desc_nr].state & (
(1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
if (ev1 < mddev->events)
return -EINVAL;
} else if (mddev->bitmap) {
/* if adding to array with a bitmap, then we can accept an
* older device ... but not too old.
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
if (ev1 < mddev->events)
set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
return 0;
}
if (mddev->level != LEVEL_MULTIPATH) {
desc = sb->disks + rdev->desc_nr;
if (desc->state & (1<<MD_DISK_FAULTY))
set_bit(Faulty, &rdev->flags);
else if (desc->state & (1<<MD_DISK_SYNC) /* &&
desc->raid_disk < mddev->raid_disks */) {
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = desc->raid_disk;
rdev->saved_raid_disk = desc->raid_disk;
} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
/* active but not in sync implies recovery up to
* reshape position. We don't know exactly where
* that is, so set to zero for now */
if (mddev->minor_version >= 91) {
rdev->recovery_offset = 0;
rdev->raid_disk = desc->raid_disk;
}
}
if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
return 0;
}
/*
* sync_super for 0.90.0
*/
static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
{
mdp_super_t *sb;
struct md_rdev *rdev2;
int next_spare = mddev->raid_disks;
/* make rdev->sb match mddev data..
*
* 1/ zero out disks
* 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
* 3/ any empty disks < next_spare become removed
*
* disks[0] gets initialised to REMOVED because
* we cannot be sure from other fields if it has
* been initialised or not.
*/
int i;
int active=0, working=0,failed=0,spare=0,nr_disks=0;
rdev->sb_size = MD_SB_BYTES;
sb = page_address(rdev->sb_page);
memset(sb, 0, sizeof(*sb));
sb->md_magic = MD_SB_MAGIC;
sb->major_version = mddev->major_version;
sb->patch_version = mddev->patch_version;
sb->gvalid_words = 0; /* ignored */
memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
memcpy(&sb->set_uuid3, mddev->uuid+12,4);
sb->ctime = mddev->ctime;
sb->level = mddev->level;
sb->size = mddev->dev_sectors / 2;
sb->raid_disks = mddev->raid_disks;
sb->md_minor = mddev->md_minor;
sb->not_persistent = 0;
sb->utime = mddev->utime;
sb->state = 0;
sb->events_hi = (mddev->events>>32);
sb->events_lo = (u32)mddev->events;
if (mddev->reshape_position == MaxSector)
sb->minor_version = 90;
else {
sb->minor_version = 91;
sb->reshape_position = mddev->reshape_position;
sb->new_level = mddev->new_level;
sb->delta_disks = mddev->delta_disks;
sb->new_layout = mddev->new_layout;
sb->new_chunk = mddev->new_chunk_sectors << 9;
}
mddev->minor_version = sb->minor_version;
if (mddev->in_sync)
{
sb->recovery_cp = mddev->recovery_cp;
sb->cp_events_hi = (mddev->events>>32);
sb->cp_events_lo = (u32)mddev->events;
if (mddev->recovery_cp == MaxSector)
sb->state = (1<< MD_SB_CLEAN);
} else
sb->recovery_cp = 0;
sb->layout = mddev->layout;
sb->chunk_size = mddev->chunk_sectors << 9;
if (mddev->bitmap && mddev->bitmap_info.file == NULL)
sb->state |= (1<<MD_SB_BITMAP_PRESENT);
sb->disks[0].state = (1<<MD_DISK_REMOVED);
rdev_for_each(rdev2, mddev) {
mdp_disk_t *d;
int desc_nr;
int is_active = test_bit(In_sync, &rdev2->flags);
if (rdev2->raid_disk >= 0 &&
sb->minor_version >= 91)
/* we have nowhere to store the recovery_offset,
* but if it is not below the reshape_position,
* we can piggy-back on that.
*/
is_active = 1;
if (rdev2->raid_disk < 0 ||
test_bit(Faulty, &rdev2->flags))
is_active = 0;
if (is_active)
desc_nr = rdev2->raid_disk;
else
desc_nr = next_spare++;
rdev2->desc_nr = desc_nr;
d = &sb->disks[rdev2->desc_nr];
nr_disks++;
d->number = rdev2->desc_nr;
d->major = MAJOR(rdev2->bdev->bd_dev);
d->minor = MINOR(rdev2->bdev->bd_dev);
if (is_active)
d->raid_disk = rdev2->raid_disk;
else
d->raid_disk = rdev2->desc_nr; /* compatibility */
if (test_bit(Faulty, &rdev2->flags))
d->state = (1<<MD_DISK_FAULTY);
else if (is_active) {
d->state = (1<<MD_DISK_ACTIVE);
if (test_bit(In_sync, &rdev2->flags))
d->state |= (1<<MD_DISK_SYNC);
active++;
working++;
} else {
d->state = 0;
spare++;
working++;
}
if (test_bit(WriteMostly, &rdev2->flags))
d->state |= (1<<MD_DISK_WRITEMOSTLY);
}
/* now set the "removed" and "faulty" bits on any missing devices */
for (i=0 ; i < mddev->raid_disks ; i++) {
mdp_disk_t *d = &sb->disks[i];
if (d->state == 0 && d->number == 0) {
d->number = i;
d->raid_disk = i;
d->state = (1<<MD_DISK_REMOVED);
d->state |= (1<<MD_DISK_FAULTY);
failed++;
}
}
sb->nr_disks = nr_disks;
sb->active_disks = active;
sb->working_disks = working;
sb->failed_disks = failed;
sb->spare_disks = spare;
sb->this_disk = sb->disks[rdev->desc_nr];
sb->sb_csum = calc_sb_csum(sb);
}
/*
* rdev_size_change for 0.90.0
*/
static unsigned long long
super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
{
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
return 0; /* component must fit device */
if (rdev->mddev->bitmap_info.offset)
return 0; /* can't move bitmap */
rdev->sb_start = calc_dev_sboffset(rdev);
if (!num_sectors || num_sectors > rdev->sb_start)
num_sectors = rdev->sb_start;
/* Limit to 4TB as metadata cannot record more than that.
* 4TB == 2^32 KB, or 2*2^32 sectors.
*/
if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
num_sectors = (2ULL << 32) - 2;
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
return num_sectors;
}
static int
super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
{
/* non-zero offset changes not possible with v0.90 */
return new_offset == 0;
}
/*
* version 1 superblock
*/
static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
{
__le32 disk_csum;
u32 csum;
unsigned long long newcsum;
int size = 256 + le32_to_cpu(sb->max_dev)*2;
__le32 *isuper = (__le32*)sb;
disk_csum = sb->sb_csum;
sb->sb_csum = 0;
newcsum = 0;
for (; size >= 4; size -= 4)
newcsum += le32_to_cpu(*isuper++);
if (size == 2)
newcsum += le16_to_cpu(*(__le16*) isuper);
csum = (newcsum & 0xffffffff) + (newcsum >> 32);
sb->sb_csum = disk_csum;
return cpu_to_le32(csum);
}
static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
int acknowledged);
static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
{
struct mdp_superblock_1 *sb;
int ret;
sector_t sb_start;
sector_t sectors;
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
int bmask;
/*
* Calculate the position of the superblock in 512byte sectors.
* It is always aligned to a 4K boundary and
* depeding on minor_version, it can be:
* 0: At least 8K, but less than 12K, from end of device
* 1: At start of device
* 2: 4K from start of device.
*/
switch(minor_version) {
case 0:
sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
sb_start -= 8*2;
sb_start &= ~(sector_t)(4*2-1);
break;
case 1:
sb_start = 0;
break;
case 2:
sb_start = 8;
break;
default:
return -EINVAL;
}
rdev->sb_start = sb_start;
/* superblock is rarely larger than 1K, but it can be larger,
* and it is safe to read 4k, so we do that
*/
ret = read_disk_sb(rdev, 4096);
if (ret) return ret;
sb = page_address(rdev->sb_page);
if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
sb->major_version != cpu_to_le32(1) ||
le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
le64_to_cpu(sb->super_offset) != rdev->sb_start ||
(le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
return -EINVAL;
if (calc_sb_1_csum(sb) != sb->sb_csum) {
printk("md: invalid superblock checksum on %s\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
if (le64_to_cpu(sb->data_size) < 10) {
printk("md: data_size too small on %s\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
if (sb->pad0 ||
sb->pad3[0] ||
memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
/* Some padding is non-zero, might be a new feature */
return -EINVAL;
rdev->preferred_minor = 0xffff;
rdev->data_offset = le64_to_cpu(sb->data_offset);
rdev->new_data_offset = rdev->data_offset;
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
(le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
if (minor_version
&& rdev->data_offset < sb_start + (rdev->sb_size/512))
return -EINVAL;
if (minor_version
&& rdev->new_data_offset < sb_start + (rdev->sb_size/512))
return -EINVAL;
if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
rdev->desc_nr = -1;
else
rdev->desc_nr = le32_to_cpu(sb->dev_number);
if (!rdev->bb_page) {
rdev->bb_page = alloc_page(GFP_KERNEL);
if (!rdev->bb_page)
return -ENOMEM;
}
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
rdev->badblocks.count == 0) {
/* need to load the bad block list.
* Currently we limit it to one page.
*/
s32 offset;
sector_t bb_sector;
u64 *bbp;
int i;
int sectors = le16_to_cpu(sb->bblog_size);
if (sectors > (PAGE_SIZE / 512))
return -EINVAL;
offset = le32_to_cpu(sb->bblog_offset);
if (offset == 0)
return -EINVAL;
bb_sector = (long long)offset;
if (!sync_page_io(rdev, bb_sector, sectors << 9,
rdev->bb_page, READ, true))
return -EIO;
bbp = (u64 *)page_address(rdev->bb_page);
rdev->badblocks.shift = sb->bblog_shift;
for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
u64 bb = le64_to_cpu(*bbp);
int count = bb & (0x3ff);
u64 sector = bb >> 10;
sector <<= sb->bblog_shift;
count <<= sb->bblog_shift;
if (bb + 1 == 0)
break;
if (md_set_badblocks(&rdev->badblocks,
sector, count, 1) == 0)
return -EINVAL;
}
} else if (sb->bblog_offset != 0)
rdev->badblocks.shift = 0;
if (!refdev) {
ret = 1;
} else {
__u64 ev1, ev2;
struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
sb->level != refsb->level ||
sb->layout != refsb->layout ||
sb->chunksize != refsb->chunksize) {
printk(KERN_WARNING "md: %s has strangely different"
" superblock to %s\n",
bdevname(rdev->bdev,b),
bdevname(refdev->bdev,b2));
return -EINVAL;
}
ev1 = le64_to_cpu(sb->events);
ev2 = le64_to_cpu(refsb->events);
if (ev1 > ev2)
ret = 1;
else
ret = 0;
}
if (minor_version) {
sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
sectors -= rdev->data_offset;
} else
sectors = rdev->sb_start;
if (sectors < le64_to_cpu(sb->data_size))
return -EINVAL;
rdev->sectors = le64_to_cpu(sb->data_size);
return ret;
}
static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
{
struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
__u64 ev1 = le64_to_cpu(sb->events);
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
mddev->major_version = 1;
mddev->patch_version = 0;
mddev->external = 0;
mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
mddev->level = le32_to_cpu(sb->level);
mddev->clevel[0] = 0;
mddev->layout = le32_to_cpu(sb->layout);
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
mddev->dev_sectors = le64_to_cpu(sb->size);
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.space = 0;
/* Default location for bitmap is 1K after superblock
* using 3K - total of 4K
*/
mddev->bitmap_info.default_offset = 1024 >> 9;
mddev->bitmap_info.default_space = (4096-1024) >> 9;
mddev->reshape_backwards = 0;
mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
memcpy(mddev->uuid, sb->set_uuid, 16);
mddev->max_disks = (4096-256)/2;
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
mddev->bitmap_info.file == NULL) {
mddev->bitmap_info.offset =
(__s32)le32_to_cpu(sb->bitmap_offset);
/* Metadata doesn't record how much space is available.
* For 1.0, we assume we can use up to the superblock
* if before, else to 4K beyond superblock.
* For others, assume no change is possible.
*/
if (mddev->minor_version > 0)
mddev->bitmap_info.space = 0;
else if (mddev->bitmap_info.offset > 0)
mddev->bitmap_info.space =
8 - mddev->bitmap_info.offset;
else
mddev->bitmap_info.space =
-mddev->bitmap_info.offset;
}
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
mddev->reshape_position = le64_to_cpu(sb->reshape_position);
mddev->delta_disks = le32_to_cpu(sb->delta_disks);
mddev->new_level = le32_to_cpu(sb->new_level);
mddev->new_layout = le32_to_cpu(sb->new_layout);
mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
if (mddev->delta_disks < 0 ||
(mddev->delta_disks == 0 &&
(le32_to_cpu(sb->feature_map)
& MD_FEATURE_RESHAPE_BACKWARDS)))
mddev->reshape_backwards = 1;
} else {
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
} else if (mddev->pers == NULL) {
/* Insist of good event counter while assembling, except for
* spares (which don't need an event count) */
++ev1;
if (rdev->desc_nr >= 0 &&
rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
if (ev1 < mddev->events)
return -EINVAL;
} else if (mddev->bitmap) {
/* If adding to array with a bitmap, then we can accept an
* older device, but not too old.
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
if (ev1 < mddev->events)
set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
return 0;
}
if (mddev->level != LEVEL_MULTIPATH) {
int role;
if (rdev->desc_nr < 0 ||
rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
role = 0xffff;
rdev->desc_nr = -1;
} else
role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
switch(role) {
case 0xffff: /* spare */
break;
case 0xfffe: /* faulty */
set_bit(Faulty, &rdev->flags);
break;
default:
rdev->saved_raid_disk = role;
if ((le32_to_cpu(sb->feature_map) &
MD_FEATURE_RECOVERY_OFFSET)) {
rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
if (!(le32_to_cpu(sb->feature_map) &
MD_FEATURE_RECOVERY_BITMAP))
rdev->saved_raid_disk = -1;
} else
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = role;
break;
}
if (sb->devflags & WriteMostly1)
set_bit(WriteMostly, &rdev->flags);
if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
set_bit(Replacement, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
return 0;
}
static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
{
struct mdp_superblock_1 *sb;
struct md_rdev *rdev2;
int max_dev, i;
/* make rdev->sb match mddev and rdev data. */
sb = page_address(rdev->sb_page);
sb->feature_map = 0;
sb->pad0 = 0;
sb->recovery_offset = cpu_to_le64(0);
memset(sb->pad3, 0, sizeof(sb->pad3));
sb->utime = cpu_to_le64((__u64)mddev->utime);
sb->events = cpu_to_le64(mddev->events);
if (mddev->in_sync)
sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
else
sb->resync_offset = cpu_to_le64(0);
sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
sb->raid_disks = cpu_to_le32(mddev->raid_disks);
sb->size = cpu_to_le64(mddev->dev_sectors);
sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
if (test_bit(WriteMostly, &rdev->flags))
sb->devflags |= WriteMostly1;
else
sb->devflags &= ~WriteMostly1;
sb->data_offset = cpu_to_le64(rdev->data_offset);
sb->data_size = cpu_to_le64(rdev->sectors);
if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
}
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags)) {
sb->feature_map |=
cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
sb->recovery_offset =
cpu_to_le64(rdev->recovery_offset);
if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
sb->feature_map |=
cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
}
if (test_bit(Replacement, &rdev->flags))
sb->feature_map |=
cpu_to_le32(MD_FEATURE_REPLACEMENT);
if (mddev->reshape_position != MaxSector) {
sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
sb->reshape_position = cpu_to_le64(mddev->reshape_position);
sb->new_layout = cpu_to_le32(mddev->new_layout);
sb->delta_disks = cpu_to_le32(mddev->delta_disks);
sb->new_level = cpu_to_le32(mddev->new_level);
sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
if (mddev->delta_disks == 0 &&
mddev->reshape_backwards)
sb->feature_map
|= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
if (rdev->new_data_offset != rdev->data_offset) {
sb->feature_map
|= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
- rdev->data_offset));
}
}
if (rdev->badblocks.count == 0)
/* Nothing to do for bad blocks*/ ;
else if (sb->bblog_offset == 0)
/* Cannot record bad blocks on this device */
md_error(mddev, rdev);
else {
struct badblocks *bb = &rdev->badblocks;
u64 *bbp = (u64 *)page_address(rdev->bb_page);
u64 *p = bb->page;
sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
if (bb->changed) {
unsigned seq;
retry:
seq = read_seqbegin(&bb->lock);
memset(bbp, 0xff, PAGE_SIZE);
for (i = 0 ; i < bb->count ; i++) {
u64 internal_bb = p[i];
u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
| BB_LEN(internal_bb));
bbp[i] = cpu_to_le64(store_bb);
}
bb->changed = 0;
if (read_seqretry(&bb->lock, seq))
goto retry;
bb->sector = (rdev->sb_start +
(int)le32_to_cpu(sb->bblog_offset));
bb->size = le16_to_cpu(sb->bblog_size);
}
}
max_dev = 0;
rdev_for_each(rdev2, mddev)
if (rdev2->desc_nr+1 > max_dev)
max_dev = rdev2->desc_nr+1;
if (max_dev > le32_to_cpu(sb->max_dev)) {
int bmask;
sb->max_dev = cpu_to_le32(max_dev);
rdev->sb_size = max_dev * 2 + 256;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
} else
max_dev = le32_to_cpu(sb->max_dev);
for (i=0; i<max_dev;i++)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
rdev_for_each(rdev2, mddev) {
i = rdev2->desc_nr;
if (test_bit(Faulty, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(0xfffe);
else if (test_bit(In_sync, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else if (rdev2->raid_disk >= 0)
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else
sb->dev_roles[i] = cpu_to_le16(0xffff);
}
sb->sb_csum = calc_sb_1_csum(sb);
}
static unsigned long long
super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
{
struct mdp_superblock_1 *sb;
sector_t max_sectors;
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
return 0; /* component must fit device */
if (rdev->data_offset != rdev->new_data_offset)
return 0; /* too confusing */
if (rdev->sb_start < rdev->data_offset) {
/* minor versions 1 and 2; superblock before data */
max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
max_sectors -= rdev->data_offset;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
} else if (rdev->mddev->bitmap_info.offset) {
/* minor version 0 with bitmap we can't move */
return 0;
} else {
/* minor version 0; superblock after data */
sector_t sb_start;
sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
sb_start &= ~(sector_t)(4*2 - 1);
max_sectors = rdev->sectors + sb_start - rdev->sb_start;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
rdev->sb_start = sb_start;
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
sb->super_offset = rdev->sb_start;
sb->sb_csum = calc_sb_1_csum(sb);
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
return num_sectors;
}
static int
super_1_allow_new_offset(struct md_rdev *rdev,
unsigned long long new_offset)
{
/* All necessary checks on new >= old have been done */
struct bitmap *bitmap;
if (new_offset >= rdev->data_offset)
return 1;
/* with 1.0 metadata, there is no metadata to tread on
* so we can always move back */
if (rdev->mddev->minor_version == 0)
return 1;
/* otherwise we must be sure not to step on
* any metadata, so stay:
* 36K beyond start of superblock
* beyond end of badblocks
* beyond write-intent bitmap
*/
if (rdev->sb_start + (32+4)*2 > new_offset)
return 0;
bitmap = rdev->mddev->bitmap;
if (bitmap && !rdev->mddev->bitmap_info.file &&
rdev->sb_start + rdev->mddev->bitmap_info.offset +
bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
return 0;
if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
return 0;
return 1;
}
static struct super_type super_types[] = {
[0] = {
.name = "0.90.0",
.owner = THIS_MODULE,
.load_super = super_90_load,
.validate_super = super_90_validate,
.sync_super = super_90_sync,
.rdev_size_change = super_90_rdev_size_change,
.allow_new_offset = super_90_allow_new_offset,
},
[1] = {
.name = "md-1",
.owner = THIS_MODULE,
.load_super = super_1_load,
.validate_super = super_1_validate,
.sync_super = super_1_sync,
.rdev_size_change = super_1_rdev_size_change,
.allow_new_offset = super_1_allow_new_offset,
},
};
static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
{
if (mddev->sync_super) {
mddev->sync_super(mddev, rdev);
return;
}
BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
super_types[mddev->major_version].sync_super(mddev, rdev);
}
static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
{
struct md_rdev *rdev, *rdev2;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev1)
rdev_for_each_rcu(rdev2, mddev2)
if (rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
rcu_read_unlock();
return 1;
}
rcu_read_unlock();
return 0;
}
static LIST_HEAD(pending_raid_disks);
/*
* Try to register data integrity profile for an mddev
*
* This is called when an array is started and after a disk has been kicked
* from the array. It only succeeds if all working and active component devices
* are integrity capable with matching profiles.
*/
int md_integrity_register(struct mddev *mddev)
{
struct md_rdev *rdev, *reference = NULL;
if (list_empty(&mddev->disks))
return 0; /* nothing to do */
if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
return 0; /* shouldn't register, or already is */
rdev_for_each(rdev, mddev) {
/* skip spares and non-functional disks */
if (test_bit(Faulty, &rdev->flags))
continue;
if (rdev->raid_disk < 0)
continue;
if (!reference) {
/* Use the first rdev as the reference */
reference = rdev;
continue;
}
/* does this rdev's profile match the reference profile? */
if (blk_integrity_compare(reference->bdev->bd_disk,
rdev->bdev->bd_disk) < 0)
return -EINVAL;
}
if (!reference || !bdev_get_integrity(reference->bdev))
return 0;
/*
* All component devices are integrity capable and have matching
* profiles, register the common profile for the md device.
*/
if (blk_integrity_register(mddev->gendisk,
bdev_get_integrity(reference->bdev)) != 0) {
printk(KERN_ERR "md: failed to register integrity for %s\n",
mdname(mddev));
return -EINVAL;
}
printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
printk(KERN_ERR "md: failed to create integrity pool for %s\n",
mdname(mddev));
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(md_integrity_register);
/* Disable data integrity if non-capable/non-matching disk is being added */
void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
struct blk_integrity *bi_rdev;
struct blk_integrity *bi_mddev;
if (!mddev->gendisk)
return;
bi_rdev = bdev_get_integrity(rdev->bdev);
bi_mddev = blk_get_integrity(mddev->gendisk);
if (!bi_mddev) /* nothing to do */
return;
if (rdev->raid_disk < 0) /* skip spares */
return;
if (bi_rdev && blk_integrity_compare(mddev->gendisk,
rdev->bdev->bd_disk) >= 0)
return;
printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
blk_integrity_unregister(mddev->gendisk);
}
EXPORT_SYMBOL(md_integrity_add_rdev);
static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
{
char b[BDEVNAME_SIZE];
struct kobject *ko;
int err;
/* prevent duplicates */
if (find_rdev(mddev, rdev->bdev->bd_dev))
return -EEXIST;
/* make sure rdev->sectors exceeds mddev->dev_sectors */
if (rdev->sectors && (mddev->dev_sectors == 0 ||
rdev->sectors < mddev->dev_sectors)) {
if (mddev->pers) {
/* Cannot change size, so fail
* If mddev->level <= 0, then we don't care
* about aligning sizes (e.g. linear)
*/
if (mddev->level > 0)
return -ENOSPC;
} else
mddev->dev_sectors = rdev->sectors;
}
/* Verify rdev->desc_nr is unique.
* If it is -1, assign a free number, else
* check number is not in use
*/
rcu_read_lock();
if (rdev->desc_nr < 0) {
int choice = 0;
if (mddev->pers)
choice = mddev->raid_disks;
while (md_find_rdev_nr_rcu(mddev, choice))
choice++;
rdev->desc_nr = choice;
} else {
if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
rcu_read_unlock();
return -EBUSY;
}
}
rcu_read_unlock();
if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
mdname(mddev), mddev->max_disks);
return -EBUSY;
}
bdevname(rdev->bdev,b);
strreplace(b, '/', '!');
rdev->mddev = mddev;
printk(KERN_INFO "md: bind<%s>\n", b);
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
if (sysfs_create_link(&rdev->kobj, ko, "block"))
/* failure here is OK */;
rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
list_add_rcu(&rdev->same_set, &mddev->disks);
bd_link_disk_holder(rdev->bdev, mddev->gendisk);
/* May as well allow recovery to be retried once */
mddev->recovery_disabled++;
return 0;
fail:
printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
b, mdname(mddev));
return err;
}
static void md_delayed_delete(struct work_struct *ws)
{
struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
kobject_del(&rdev->kobj);
kobject_put(&rdev->kobj);
}
static void unbind_rdev_from_array(struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
rdev->sysfs_state = NULL;
rdev->badblocks.count = 0;
/* We need to delay this, otherwise we can deadlock when
* writing to 'remove' to "dev/state". We also need
* to delay it due to rcu usage.
*/
synchronize_rcu();
INIT_WORK(&rdev->del_work, md_delayed_delete);
kobject_get(&rdev->kobj);
queue_work(md_misc_wq, &rdev->del_work);
}
/*
* prevent the device from being mounted, repartitioned or
* otherwise reused by a RAID array (or any other kernel
* subsystem), by bd_claiming the device.
*/
static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
{
int err = 0;
struct block_device *bdev;
char b[BDEVNAME_SIZE];
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
shared ? (struct md_rdev *)lock_rdev : rdev);
if (IS_ERR(bdev)) {
printk(KERN_ERR "md: could not open %s.\n",
__bdevname(dev, b));
return PTR_ERR(bdev);
}
rdev->bdev = bdev;
return err;
}
static void unlock_rdev(struct md_rdev *rdev)
{
struct block_device *bdev = rdev->bdev;
rdev->bdev = NULL;
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
void md_autodetect_dev(dev_t dev);
static void export_rdev(struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: export_rdev(%s)\n",
bdevname(rdev->bdev,b));
md_rdev_clear(rdev);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
#endif
unlock_rdev(rdev);
kobject_put(&rdev->kobj);
}
void md_kick_rdev_from_array(struct md_rdev *rdev)
{
unbind_rdev_from_array(rdev);
export_rdev(rdev);
}
EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
static void export_array(struct mddev *mddev)
{
struct md_rdev *rdev;
while (!list_empty(&mddev->disks)) {
rdev = list_first_entry(&mddev->disks, struct md_rdev,
same_set);
md_kick_rdev_from_array(rdev);
}
mddev->raid_disks = 0;
mddev->major_version = 0;
}
static void sync_sbs(struct mddev *mddev, int nospares)
{
/* Update each superblock (in-memory image), but
* if we are allowed to, skip spares which already
* have the right event counter, or have one earlier
* (which would mean they aren't being marked as dirty
* with the rest of the array)
*/
struct md_rdev *rdev;
rdev_for_each(rdev, mddev) {
if (rdev->sb_events == mddev->events ||
(nospares &&
rdev->raid_disk < 0 &&
rdev->sb_events+1 == mddev->events)) {
/* Don't update this superblock */
rdev->sb_loaded = 2;
} else {
sync_super(mddev, rdev);
rdev->sb_loaded = 1;
}
}
}
void md_update_sb(struct mddev *mddev, int force_change)
{
struct md_rdev *rdev;
int sync_req;
int nospares = 0;
int any_badblocks_changed = 0;
if (mddev->ro) {
if (force_change)
set_bit(MD_CHANGE_DEVS, &mddev->flags);
return;
}
repeat:
/* First make sure individual recovery_offsets are correct */
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk >= 0 &&
mddev->delta_disks >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
mddev->curr_resync_completed > rdev->recovery_offset)
rdev->recovery_offset = mddev->curr_resync_completed;
}
if (!mddev->persistent) {
clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
clear_bit(MD_CHANGE_DEVS, &mddev->flags);
if (!mddev->external) {
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
rdev_for_each(rdev, mddev) {
if (rdev->badblocks.changed) {
rdev->badblocks.changed = 0;
md_ack_all_badblocks(&rdev->badblocks);
md_error(mddev, rdev);
}
clear_bit(Blocked, &rdev->flags);
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
}
}
wake_up(&mddev->sb_wait);
return;
}
spin_lock(&mddev->lock);
mddev->utime = get_seconds();
if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
force_change = 1;
if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
/* just a clean<-> dirty transition, possibly leave spares alone,
* though if events isn't the right even/odd, we will have to do
* spares after all
*/
nospares = 1;
if (force_change)
nospares = 0;
if (mddev->degraded)
/* If the array is degraded, then skipping spares is both
* dangerous and fairly pointless.
* Dangerous because a device that was removed from the array
* might have a event_count that still looks up-to-date,
* so it can be re-added without a resync.
* Pointless because if there are any spares to skip,
* then a recovery will happen and soon that array won't
* be degraded any more and the spare can go back to sleep then.
*/
nospares = 0;
sync_req = mddev->in_sync;
/* If this is just a dirty<->clean transition, and the array is clean
* and 'events' is odd, we can roll back to the previous clean state */
if (nospares
&& (mddev->in_sync && mddev->recovery_cp == MaxSector)
&& mddev->can_decrease_events
&& mddev->events != 1) {
mddev->events--;
mddev->can_decrease_events = 0;
} else {
/* otherwise we have to go forward and ... */
mddev->events ++;
mddev->can_decrease_events = nospares;
}
/*
* This 64-bit counter should never wrap.
* Either we are in around ~1 trillion A.C., assuming
* 1 reboot per second, or we have a bug...
*/
WARN_ON(mddev->events == 0);
rdev_for_each(rdev, mddev) {
if (rdev->badblocks.changed)
any_badblocks_changed++;
if (test_bit(Faulty, &rdev->flags))
set_bit(FaultRecorded, &rdev->flags);
}
sync_sbs(mddev, nospares);
spin_unlock(&mddev->lock);
pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
mdname(mddev), mddev->in_sync);
bitmap_update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
if (rdev->sb_loaded != 1)
continue; /* no noise on spare devices */
if (!test_bit(Faulty, &rdev->flags)) {
md_super_write(mddev,rdev,
rdev->sb_start, rdev->sb_size,
rdev->sb_page);
pr_debug("md: (write) %s's sb offset: %llu\n",
bdevname(rdev->bdev, b),
(unsigned long long)rdev->sb_start);
rdev->sb_events = mddev->events;
if (rdev->badblocks.size) {
md_super_write(mddev, rdev,
rdev->badblocks.sector,
rdev->badblocks.size << 9,
rdev->bb_page);
rdev->badblocks.size = 0;
}
} else
pr_debug("md: %s (skipping faulty)\n",
bdevname(rdev->bdev, b));
if (mddev->level == LEVEL_MULTIPATH)
/* only need to write one superblock... */
break;
}
md_super_wait(mddev);
/* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
spin_lock(&mddev->lock);
if (mddev->in_sync != sync_req ||
test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
/* have to write it out again */
spin_unlock(&mddev->lock);
goto repeat;
}
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
spin_unlock(&mddev->lock);
wake_up(&mddev->sb_wait);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
rdev_for_each(rdev, mddev) {
if (test_and_clear_bit(FaultRecorded, &rdev->flags))
clear_bit(Blocked, &rdev->flags);
if (any_badblocks_changed)
md_ack_all_badblocks(&rdev->badblocks);
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
}
}
EXPORT_SYMBOL(md_update_sb);
static int add_bound_rdev(struct md_rdev *rdev)
{
struct mddev *mddev = rdev->mddev;
int err = 0;
if (!mddev->pers->hot_remove_disk) {
/* If there is hot_add_disk but no hot_remove_disk
* then added disks for geometry changes,
* and should be added immediately.
*/
super_types[mddev->major_version].
validate_super(mddev, rdev);
err = mddev->pers->hot_add_disk(mddev, rdev);
if (err) {
unbind_rdev_from_array(rdev);
export_rdev(rdev);
return err;
}
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_new_event(mddev);
md_wakeup_thread(mddev->thread);
return 0;
}
/* words written to sysfs files may, or may not, be \n terminated.
* We want to accept with case. For this we use cmd_match.
*/
static int cmd_match(const char *cmd, const char *str)
{
/* See if cmd, written into a sysfs file, matches
* str. They must either be the same, or cmd can
* have a trailing newline
*/
while (*cmd && *str && *cmd == *str) {
cmd++;
str++;
}
if (*cmd == '\n')
cmd++;
if (*str || *cmd)
return 0;
return 1;
}
struct rdev_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct md_rdev *, char *);
ssize_t (*store)(struct md_rdev *, const char *, size_t);
};
static ssize_t
state_show(struct md_rdev *rdev, char *page)
{
char *sep = "";
size_t len = 0;
unsigned long flags = ACCESS_ONCE(rdev->flags);
if (test_bit(Faulty, &flags) ||
rdev->badblocks.unacked_exist) {
len+= sprintf(page+len, "%sfaulty",sep);
sep = ",";
}
if (test_bit(In_sync, &flags)) {
len += sprintf(page+len, "%sin_sync",sep);
sep = ",";
}
if (test_bit(WriteMostly, &flags)) {
len += sprintf(page+len, "%swrite_mostly",sep);
sep = ",";
}
if (test_bit(Blocked, &flags) ||
(rdev->badblocks.unacked_exist
&& !test_bit(Faulty, &flags))) {
len += sprintf(page+len, "%sblocked", sep);
sep = ",";
}
if (!test_bit(Faulty, &flags) &&
!test_bit(In_sync, &flags)) {
len += sprintf(page+len, "%sspare", sep);
sep = ",";
}
if (test_bit(WriteErrorSeen, &flags)) {
len += sprintf(page+len, "%swrite_error", sep);
sep = ",";
}
if (test_bit(WantReplacement, &flags)) {
len += sprintf(page+len, "%swant_replacement", sep);
sep = ",";
}
if (test_bit(Replacement, &flags)) {
len += sprintf(page+len, "%sreplacement", sep);
sep = ",";
}
return len+sprintf(page+len, "\n");
}
static ssize_t
state_store(struct md_rdev *rdev, const char *buf, size_t len)
{
/* can write
* faulty - simulates an error
* remove - disconnects the device
* writemostly - sets write_mostly
* -writemostly - clears write_mostly
* blocked - sets the Blocked flags
* -blocked - clears the Blocked and possibly simulates an error
* insync - sets Insync providing device isn't active
* -insync - clear Insync for a device with a slot assigned,
* so that it gets rebuilt based on bitmap
* write_error - sets WriteErrorSeen
* -write_error - clears WriteErrorSeen
*/
int err = -EINVAL;
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
md_error(rdev->mddev, rdev);
if (test_bit(Faulty, &rdev->flags))
err = 0;
else
err = -EBUSY;
} else if (cmd_match(buf, "remove")) {
if (rdev->raid_disk >= 0)
err = -EBUSY;
else {
struct mddev *mddev = rdev->mddev;
if (mddev_is_clustered(mddev))
md_cluster_ops->remove_disk(mddev, rdev);
md_kick_rdev_from_array(rdev);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
if (mddev->pers)
md_update_sb(mddev, 1);
md_new_event(mddev);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
err = 0;
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-writemostly")) {
clear_bit(WriteMostly, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "blocked")) {
set_bit(Blocked, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-blocked")) {
if (!test_bit(Faulty, &rdev->flags) &&
rdev->badblocks.unacked_exist) {
/* metadata handler doesn't understand badblocks,
* so we need to fail the device
*/
md_error(rdev->mddev, rdev);
}
clear_bit(Blocked, &rdev->flags);
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
err = 0;
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
set_bit(In_sync, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) {
if (rdev->mddev->pers == NULL) {
clear_bit(In_sync, &rdev->flags);
rdev->saved_raid_disk = rdev->raid_disk;
rdev->raid_disk = -1;
err = 0;
}
} else if (cmd_match(buf, "write_error")) {
set_bit(WriteErrorSeen, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-write_error")) {
clear_bit(WriteErrorSeen, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "want_replacement")) {
/* Any non-spare device that is not a replacement can
* become want_replacement at any time, but we then need to
* check if recovery is needed.
*/
if (rdev->raid_disk >= 0 &&
!test_bit(Replacement, &rdev->flags))
set_bit(WantReplacement, &rdev->flags);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
err = 0;
} else if (cmd_match(buf, "-want_replacement")) {
/* Clearing 'want_replacement' is always allowed.
* Once replacements starts it is too late though.
*/
err = 0;
clear_bit(WantReplacement, &rdev->flags);
} else if (cmd_match(buf, "replacement")) {
/* Can only set a device as a replacement when array has not
* yet been started. Once running, replacement is automatic
* from spares, or by assigning 'slot'.
*/
if (rdev->mddev->pers)
err = -EBUSY;
else {
set_bit(Replacement, &rdev->flags);
err = 0;
}
} else if (cmd_match(buf, "-replacement")) {
/* Similarly, can only clear Replacement before start */
if (rdev->mddev->pers)
err = -EBUSY;
else {
clear_bit(Replacement, &rdev->flags);
err = 0;
}
} else if (cmd_match(buf, "re-add")) {
if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
/* clear_bit is performed _after_ all the devices
* have their local Faulty bit cleared. If any writes
* happen in the meantime in the local node, they
* will land in the local bitmap, which will be synced
* by this node eventually
*/
if (!mddev_is_clustered(rdev->mddev) ||
(err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
clear_bit(Faulty, &rdev->flags);
err = add_bound_rdev(rdev);
}
} else
err = -EBUSY;
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
return err ? err : len;
}
static struct rdev_sysfs_entry rdev_state =
__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
static ssize_t
errors_show(struct md_rdev *rdev, char *page)
{
return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
}
static ssize_t
errors_store(struct md_rdev *rdev, const char *buf, size_t len)
{
unsigned int n;
int rv;
rv = kstrtouint(buf, 10, &n);
if (rv < 0)
return rv;
atomic_set(&rdev->corrected_errors, n);
return len;
}
static struct rdev_sysfs_entry rdev_errors =
__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
static ssize_t
slot_show(struct md_rdev *rdev, char *page)
{
if (rdev->raid_disk < 0)
return sprintf(page, "none\n");
else
return sprintf(page, "%d\n", rdev->raid_disk);
}
static ssize_t
slot_store(struct md_rdev *rdev, const char *buf, size_t len)
{
int slot;
int err;
if (strncmp(buf, "none", 4)==0)
slot = -1;
else {
err = kstrtouint(buf, 10, (unsigned int *)&slot);
if (err < 0)
return err;
}
if (rdev->mddev->pers && slot == -1) {
/* Setting 'slot' on an active array requires also
* updating the 'rd%d' link, and communicating
* with the personality with ->hot_*_disk.
* For now we only support removing
* failed/spare devices. This normally happens automatically,
* but not when the metadata is externally managed.
*/
if (rdev->raid_disk == -1)
return -EEXIST;
/* personality does all needed checks */
if (rdev->mddev->pers->hot_remove_disk == NULL)
return -EINVAL;
clear_bit(Blocked, &rdev->flags);
remove_and_add_spares(rdev->mddev, rdev);
if (rdev->raid_disk >= 0)
return -EBUSY;
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
} else if (rdev->mddev->pers) {
/* Activating a spare .. or possibly reactivating
* if we ever get bitmaps working here.
*/
if (rdev->raid_disk != -1)
return -EBUSY;
if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
return -EBUSY;
if (rdev->mddev->pers->hot_add_disk == NULL)
return -EINVAL;
if (slot >= rdev->mddev->raid_disks &&
slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
return -ENOSPC;
rdev->raid_disk = slot;
if (test_bit(In_sync, &rdev->flags))
rdev->saved_raid_disk = slot;
else
rdev->saved_raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
err = rdev->mddev->pers->
hot_add_disk(rdev->mddev, rdev);
if (err) {
rdev->raid_disk = -1;
return err;
} else
sysfs_notify_dirent_safe(rdev->sysfs_state);
if (sysfs_link_rdev(rdev->mddev, rdev))
/* failure here is OK */;
/* don't wakeup anyone, leave that to userspace. */
} else {
if (slot >= rdev->mddev->raid_disks &&
slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
return -ENOSPC;
rdev->raid_disk = slot;
/* assume it is working */
clear_bit(Faulty, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
set_bit(In_sync, &rdev->flags);
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
return len;
}
static struct rdev_sysfs_entry rdev_slot =
__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
static ssize_t
offset_show(struct md_rdev *rdev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
}
static ssize_t
offset_store(struct md_rdev *rdev, const char *buf, size_t len)
{
unsigned long long offset;
if (kstrtoull(buf, 10, &offset) < 0)
return -EINVAL;
if (rdev->mddev->pers && rdev->raid_disk >= 0)
return -EBUSY;
if (rdev->sectors && rdev->mddev->external)
/* Must set offset before size, so overlap checks
* can be sane */
return -EBUSY;
rdev->data_offset = offset;
rdev->new_data_offset = offset;
return len;
}
static struct rdev_sysfs_entry rdev_offset =
__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)rdev->new_data_offset);
}
static ssize_t new_offset_store(struct md_rdev *rdev,
const char *buf, size_t len)
{
unsigned long long new_offset;
struct mddev *mddev = rdev->mddev;
if (kstrtoull(buf, 10, &new_offset) < 0)
return -EINVAL;
if (mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
return -EBUSY;
if (new_offset == rdev->data_offset)
/* reset is always permitted */
;
else if (new_offset > rdev->data_offset) {
/* must not push array size beyond rdev_sectors */
if (new_offset - rdev->data_offset
+ mddev->dev_sectors > rdev->sectors)
return -E2BIG;
}
/* Metadata worries about other space details. */
/* decreasing the offset is inconsistent with a backwards
* reshape.
*/
if (new_offset < rdev->data_offset &&
mddev->reshape_backwards)
return -EINVAL;
/* Increasing offset is inconsistent with forwards
* reshape. reshape_direction should be set to
* 'backwards' first.
*/
if (new_offset > rdev->data_offset &&
!mddev->reshape_backwards)
return -EINVAL;
if (mddev->pers && mddev->persistent &&
!super_types[mddev->major_version]
.allow_new_offset(rdev, new_offset))
return -E2BIG;
rdev->new_data_offset = new_offset;
if (new_offset > rdev->data_offset)
mddev->reshape_backwards = 1;
else if (new_offset < rdev->data_offset)
mddev->reshape_backwards = 0;
return len;
}
static struct rdev_sysfs_entry rdev_new_offset =
__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
static ssize_t
rdev_size_show(struct md_rdev *rdev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
}
static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
{
/* check if two start/length pairs overlap */
if (s1+l1 <= s2)
return 0;
if (s2+l2 <= s1)
return 0;
return 1;
}
static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
{
unsigned long long blocks;
sector_t new;
if (kstrtoull(buf, 10, &blocks) < 0)
return -EINVAL;
if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
return -EINVAL; /* sector conversion overflow */
new = blocks * 2;
if (new != blocks * 2)
return -EINVAL; /* unsigned long long to sector_t overflow */
*sectors = new;
return 0;
}
static ssize_t
rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
{
struct mddev *my_mddev = rdev->mddev;
sector_t oldsectors = rdev->sectors;
sector_t sectors;
if (strict_blocks_to_sectors(buf, §ors) < 0)
return -EINVAL;
if (rdev->data_offset != rdev->new_data_offset)
return -EINVAL; /* too confusing */
if (my_mddev->pers && rdev->raid_disk >= 0) {
if (my_mddev->persistent) {
sectors = super_types[my_mddev->major_version].
rdev_size_change(rdev, sectors);
if (!sectors)
return -EBUSY;
} else if (!sectors)
sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
rdev->data_offset;
if (!my_mddev->pers->resize)
/* Cannot change size for RAID0 or Linear etc */
return -EINVAL;
}
if (sectors < my_mddev->dev_sectors)
return -EINVAL; /* component must fit device */
rdev->sectors = sectors;
if (sectors > oldsectors && my_mddev->external) {
/* Need to check that all other rdevs with the same
* ->bdev do not overlap. 'rcu' is sufficient to walk
* the rdev lists safely.
* This check does not provide a hard guarantee, it
* just helps avoid dangerous mistakes.
*/
struct mddev *mddev;
int overlap = 0;
struct list_head *tmp;
rcu_read_lock();
for_each_mddev(mddev, tmp) {
struct md_rdev *rdev2;
rdev_for_each(rdev2, mddev)
if (rdev->bdev == rdev2->bdev &&
rdev != rdev2 &&
overlaps(rdev->data_offset, rdev->sectors,
rdev2->data_offset,
rdev2->sectors)) {
overlap = 1;
break;
}
if (overlap) {
mddev_put(mddev);
break;
}
}
rcu_read_unlock();
if (overlap) {
/* Someone else could have slipped in a size
* change here, but doing so is just silly.
* We put oldsectors back because we *know* it is
* safe, and trust userspace not to race with
* itself
*/
rdev->sectors = oldsectors;
return -EBUSY;
}
}
return len;
}
static struct rdev_sysfs_entry rdev_size =
__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
{
unsigned long long recovery_start = rdev->recovery_offset;
if (test_bit(In_sync, &rdev->flags) ||
recovery_start == MaxSector)
return sprintf(page, "none\n");
return sprintf(page, "%llu\n", recovery_start);
}
static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
{
unsigned long long recovery_start;
if (cmd_match(buf, "none"))
recovery_start = MaxSector;
else if (kstrtoull(buf, 10, &recovery_start))
return -EINVAL;
if (rdev->mddev->pers &&
rdev->raid_disk >= 0)
return -EBUSY;
rdev->recovery_offset = recovery_start;
if (recovery_start == MaxSector)
set_bit(In_sync, &rdev->flags);
else
clear_bit(In_sync, &rdev->flags);
return len;
}
static struct rdev_sysfs_entry rdev_recovery_start =
__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
static ssize_t
badblocks_show(struct badblocks *bb, char *page, int unack);
static ssize_t
badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);
static ssize_t bb_show(struct md_rdev *rdev, char *page)
{
return badblocks_show(&rdev->badblocks, page, 0);
}
static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
{
int rv = badblocks_store(&rdev->badblocks, page, len, 0);
/* Maybe that ack was all we needed */
if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
wake_up(&rdev->blocked_wait);
return rv;
}
static struct rdev_sysfs_entry rdev_bad_blocks =
__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
static ssize_t ubb_show(struct md_rdev *rdev, char *page)
{
return badblocks_show(&rdev->badblocks, page, 1);
}
static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
{
return badblocks_store(&rdev->badblocks, page, len, 1);
}
static struct rdev_sysfs_entry rdev_unack_bad_blocks =
__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
static struct attribute *rdev_default_attrs[] = {
&rdev_state.attr,
&rdev_errors.attr,
&rdev_slot.attr,
&rdev_offset.attr,
&rdev_new_offset.attr,
&rdev_size.attr,
&rdev_recovery_start.attr,
&rdev_bad_blocks.attr,
&rdev_unack_bad_blocks.attr,
NULL,
};
static ssize_t
rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
if (!entry->show)
return -EIO;
if (!rdev->mddev)
return -EBUSY;
return entry->show(rdev, page);
}
static ssize_t
rdev_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
ssize_t rv;
struct mddev *mddev = rdev->mddev;
if (!entry->store)
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
rv = mddev ? mddev_lock(mddev): -EBUSY;
if (!rv) {
if (rdev->mddev == NULL)
rv = -EBUSY;
else
rv = entry->store(rdev, page, length);
mddev_unlock(mddev);
}
return rv;
}
static void rdev_free(struct kobject *ko)
{
struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
kfree(rdev);
}
static const struct sysfs_ops rdev_sysfs_ops = {
.show = rdev_attr_show,
.store = rdev_attr_store,
};
static struct kobj_type rdev_ktype = {
.release = rdev_free,
.sysfs_ops = &rdev_sysfs_ops,
.default_attrs = rdev_default_attrs,
};
int md_rdev_init(struct md_rdev *rdev)
{
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
rdev->raid_disk = -1;
rdev->flags = 0;
rdev->data_offset = 0;
rdev->new_data_offset = 0;
rdev->sb_events = 0;
rdev->last_read_error.tv_sec = 0;
rdev->last_read_error.tv_nsec = 0;
rdev->sb_loaded = 0;
rdev->bb_page = NULL;
atomic_set(&rdev->nr_pending, 0);
atomic_set(&rdev->read_errors, 0);
atomic_set(&rdev->corrected_errors, 0);
INIT_LIST_HEAD(&rdev->same_set);
init_waitqueue_head(&rdev->blocked_wait);
/* Add space to store bad block list.
* This reserves the space even on arrays where it cannot
* be used - I wonder if that matters
*/
rdev->badblocks.count = 0;
rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
seqlock_init(&rdev->badblocks.lock);
if (rdev->badblocks.page == NULL)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(md_rdev_init);
/*
* Import a device. If 'super_format' >= 0, then sanity check the superblock
*
* mark the device faulty if:
*
* - the device is nonexistent (zero size)
* - the device has no valid superblock
*
* a faulty rdev _never_ has rdev->sb set.
*/
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
{
char b[BDEVNAME_SIZE];
int err;
struct md_rdev *rdev;
sector_t size;
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
if (!rdev) {
printk(KERN_ERR "md: could not alloc mem for new device!\n");
return ERR_PTR(-ENOMEM);
}
err = md_rdev_init(rdev);
if (err)
goto abort_free;
err = alloc_disk_sb(rdev);
if (err)
goto abort_free;
err = lock_rdev(rdev, newdev, super_format == -2);
if (err)
goto abort_free;
kobject_init(&rdev->kobj, &rdev_ktype);
size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
if (!size) {
printk(KERN_WARNING
"md: %s has zero or unknown size, marking faulty!\n",
bdevname(rdev->bdev,b));
err = -EINVAL;
goto abort_free;
}
if (super_format >= 0) {
err = super_types[super_format].
load_super(rdev, NULL, super_minor);
if (err == -EINVAL) {
printk(KERN_WARNING
"md: %s does not have a valid v%d.%d "
"superblock, not importing!\n",
bdevname(rdev->bdev,b),
super_format, super_minor);
goto abort_free;
}
if (err < 0) {
printk(KERN_WARNING
"md: could not read %s's sb, not importing!\n",
bdevname(rdev->bdev,b));
goto abort_free;
}
}
return rdev;
abort_free:
if (rdev->bdev)
unlock_rdev(rdev);
md_rdev_clear(rdev);
kfree(rdev);
return ERR_PTR(err);
}
/*
* Check a full RAID array for plausibility
*/
static void analyze_sbs(struct mddev *mddev)
{
int i;
struct md_rdev *rdev, *freshest, *tmp;
char b[BDEVNAME_SIZE];
freshest = NULL;
rdev_for_each_safe(rdev, tmp, mddev)
switch (super_types[mddev->major_version].
load_super(rdev, freshest, mddev->minor_version)) {
case 1:
freshest = rdev;
break;
case 0:
break;
default:
printk( KERN_ERR \
"md: fatal superblock inconsistency in %s"
" -- removing from array\n",
bdevname(rdev->bdev,b));
md_kick_rdev_from_array(rdev);
}
super_types[mddev->major_version].
validate_super(mddev, freshest);
i = 0;
rdev_for_each_safe(rdev, tmp, mddev) {
if (mddev->max_disks &&
(rdev->desc_nr >= mddev->max_disks ||
i > mddev->max_disks)) {
printk(KERN_WARNING
"md: %s: %s: only %d devices permitted\n",
mdname(mddev), bdevname(rdev->bdev, b),
mddev->max_disks);
md_kick_rdev_from_array(rdev);
continue;
}
if (rdev != freshest) {
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
printk(KERN_WARNING "md: kicking non-fresh %s"
" from array!\n",
bdevname(rdev->bdev,b));
md_kick_rdev_from_array(rdev);
continue;
}
/* No device should have a Candidate flag
* when reading devices
*/
if (test_bit(Candidate, &rdev->flags)) {
pr_info("md: kicking Cluster Candidate %s from array!\n",
bdevname(rdev->bdev, b));
md_kick_rdev_from_array(rdev);
}
}
if (mddev->level == LEVEL_MULTIPATH) {
rdev->desc_nr = i++;
rdev->raid_disk = rdev->desc_nr;
set_bit(In_sync, &rdev->flags);
} else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
rdev->raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
}
}
}
/* Read a fixed-point number.
* Numbers in sysfs attributes should be in "standard" units where
* possible, so time should be in seconds.
* However we internally use a a much smaller unit such as
* milliseconds or jiffies.
* This function takes a decimal number with a possible fractional
* component, and produces an integer which is the result of
* multiplying that number by 10^'scale'.
* all without any floating-point arithmetic.
*/
int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
{
unsigned long result = 0;
long decimals = -1;
while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
if (*cp == '.')
decimals = 0;
else if (decimals < scale) {
unsigned int value;
value = *cp - '0';
result = result * 10 + value;
if (decimals >= 0)
decimals++;
}
cp++;
}
if (*cp == '\n')
cp++;
if (*cp)
return -EINVAL;
if (decimals < 0)
decimals = 0;
while (decimals < scale) {
result *= 10;
decimals ++;
}
*res = result;
return 0;
}
static void md_safemode_timeout(unsigned long data);
static ssize_t
safe_delay_show(struct mddev *mddev, char *page)
{
int msec = (mddev->safemode_delay*1000)/HZ;
return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
}
static ssize_t
safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
{
unsigned long msec;
if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
return -EINVAL;
if (msec == 0)
mddev->safemode_delay = 0;
else {
unsigned long old_delay = mddev->safemode_delay;
unsigned long new_delay = (msec*HZ)/1000;
if (new_delay == 0)
new_delay = 1;
mddev->safemode_delay = new_delay;
if (new_delay < old_delay || old_delay == 0)
mod_timer(&mddev->safemode_timer, jiffies+1);
}
return len;
}
static struct md_sysfs_entry md_safe_delay =
__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
static ssize_t
level_show(struct mddev *mddev, char *page)
{
struct md_personality *p;
int ret;
spin_lock(&mddev->lock);
p = mddev->pers;
if (p)
ret = sprintf(page, "%s\n", p->name);
else if (mddev->clevel[0])
ret = sprintf(page, "%s\n", mddev->clevel);
else if (mddev->level != LEVEL_NONE)
ret = sprintf(page, "%d\n", mddev->level);
else
ret = 0;
spin_unlock(&mddev->lock);
return ret;
}
static ssize_t
level_store(struct mddev *mddev, const char *buf, size_t len)
{
char clevel[16];
ssize_t rv;
size_t slen = len;
struct md_personality *pers, *oldpers;
long level;
void *priv, *oldpriv;
struct md_rdev *rdev;
if (slen == 0 || slen >= sizeof(clevel))
return -EINVAL;
rv = mddev_lock(mddev);
if (rv)
return rv;
if (mddev->pers == NULL) {
strncpy(mddev->clevel, buf, slen);
if (mddev->clevel[slen-1] == '\n')
slen--;
mddev->clevel[slen] = 0;
mddev->level = LEVEL_NONE;
rv = len;
goto out_unlock;
}
rv = -EROFS;
if (mddev->ro)
goto out_unlock;
/* request to change the personality. Need to ensure:
* - array is not engaged in resync/recovery/reshape
* - old personality can be suspended
* - new personality will access other array.
*/
rv = -EBUSY;
if (mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
mddev->reshape_position != MaxSector ||
mddev->sysfs_active)
goto out_unlock;
rv = -EINVAL;
if (!mddev->pers->quiesce) {
printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
mdname(mddev), mddev->pers->name);
goto out_unlock;
}
/* Now find the new personality */
strncpy(clevel, buf, slen);
if (clevel[slen-1] == '\n')
slen--;
clevel[slen] = 0;
if (kstrtol(clevel, 10, &level))
level = LEVEL_NONE;
if (request_module("md-%s", clevel) != 0)
request_module("md-level-%s", clevel);
spin_lock(&pers_lock);
pers = find_pers(level, clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
rv = -EINVAL;
goto out_unlock;
}
spin_unlock(&pers_lock);
if (pers == mddev->pers) {
/* Nothing to do! */
module_put(pers->owner);
rv = len;
goto out_unlock;
}
if (!pers->takeover) {
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
mdname(mddev), clevel);
rv = -EINVAL;
goto out_unlock;
}
rdev_for_each(rdev, mddev)
rdev->new_raid_disk = rdev->raid_disk;
/* ->takeover must set new_* and/or delta_disks
* if it succeeds, and may set them when it fails.
*/
priv = pers->takeover(mddev);
if (IS_ERR(priv)) {
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->raid_disks -= mddev->delta_disks;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s would not accept array\n",
mdname(mddev), clevel);
rv = PTR_ERR(priv);
goto out_unlock;
}
/* Looks like we have a winner */
mddev_suspend(mddev);
mddev_detach(mddev);
spin_lock(&mddev->lock);
oldpers = mddev->pers;
oldpriv = mddev->private;
mddev->pers = pers;
mddev->private = priv;
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
mddev->level = mddev->new_level;
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
mddev->degraded = 0;
spin_unlock(&mddev->lock);
if (oldpers->sync_request == NULL &&
mddev->external) {
/* We are converting from a no-redundancy array
* to a redundancy array and metadata is managed
* externally so we need to be sure that writes
* won't block due to a need to transition
* clean->dirty
* until external management is started.
*/
mddev->in_sync = 0;
mddev->safemode_delay = 0;
mddev->safemode = 0;
}
oldpers->free(mddev, oldpriv);
if (oldpers->sync_request == NULL &&
pers->sync_request != NULL) {
/* need to add the md_redundancy_group */
if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
}
if (oldpers->sync_request != NULL &&
pers->sync_request == NULL) {
/* need to remove the md_redundancy_group */
if (mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
}
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk < 0)
continue;
if (rdev->new_raid_disk >= mddev->raid_disks)
rdev->new_raid_disk = -1;
if (rdev->new_raid_disk == rdev->raid_disk)
continue;
sysfs_unlink_rdev(mddev, rdev);
}
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk < 0)
continue;
if (rdev->new_raid_disk == rdev->raid_disk)
continue;
rdev->raid_disk = rdev->new_raid_disk;
if (rdev->raid_disk < 0)
clear_bit(In_sync, &rdev->flags);
else {
if (sysfs_link_rdev(mddev, rdev))
printk(KERN_WARNING "md: cannot register rd%d"
" for %s after level change\n",
rdev->raid_disk, mdname(mddev));
}
}
if (pers->sync_request == NULL) {
/* this is now an array without redundancy, so
* it must always be in_sync
*/
mddev->in_sync = 1;
del_timer_sync(&mddev->safemode_timer);
}
blk_set_stacking_limits(&mddev->queue->limits);
pers->run(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev_resume(mddev);
if (!mddev->thread)
md_update_sb(mddev, 1);
sysfs_notify(&mddev->kobj, NULL, "level");
md_new_event(mddev);
rv = len;
out_unlock:
mddev_unlock(mddev);
return rv;
}
static struct md_sysfs_entry md_level =
__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
static ssize_t
layout_show(struct mddev *mddev, char *page)
{
/* just a number, not meaningful for all levels */
if (mddev->reshape_position != MaxSector &&
mddev->layout != mddev->new_layout)
return sprintf(page, "%d (%d)\n",
mddev->new_layout, mddev->layout);
return sprintf(page, "%d\n", mddev->layout);
}
static ssize_t
layout_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned int n;
int err;
err = kstrtouint(buf, 10, &n);
if (err < 0)
return err;
err = mddev_lock(mddev);
if (err)
return err;
if (mddev->pers) {
if (mddev->pers->check_reshape == NULL)
err = -EBUSY;
else if (mddev->ro)
err = -EROFS;
else {
mddev->new_layout = n;
err = mddev->pers->check_reshape(mddev);
if (err)
mddev->new_layout = mddev->layout;
}
} else {
mddev->new_layout = n;
if (mddev->reshape_position == MaxSector)
mddev->layout = n;
}
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_layout =
__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
static ssize_t
raid_disks_show(struct mddev *mddev, char *page)
{
if (mddev->raid_disks == 0)
return 0;
if (mddev->reshape_position != MaxSector &&
mddev->delta_disks != 0)
return sprintf(page, "%d (%d)\n", mddev->raid_disks,
mddev->raid_disks - mddev->delta_disks);
return sprintf(page, "%d\n", mddev->raid_disks);
}
static int update_raid_disks(struct mddev *mddev, int raid_disks);
static ssize_t
raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned int n;
int err;
err = kstrtouint(buf, 10, &n);
if (err < 0)
return err;
err = mddev_lock(mddev);
if (err)
return err;
if (mddev->pers)
err = update_raid_disks(mddev, n);
else if (mddev->reshape_position != MaxSector) {
struct md_rdev *rdev;
int olddisks = mddev->raid_disks - mddev->delta_disks;
err = -EINVAL;
rdev_for_each(rdev, mddev) {
if (olddisks < n &&
rdev->data_offset < rdev->new_data_offset)
goto out_unlock;
if (olddisks > n &&
rdev->data_offset > rdev->new_data_offset)
goto out_unlock;
}
err = 0;
mddev->delta_disks = n - olddisks;
mddev->raid_disks = n;
mddev->reshape_backwards = (mddev->delta_disks < 0);
} else
mddev->raid_disks = n;
out_unlock:
mddev_unlock(mddev);
return err ? err : len;
}
static struct md_sysfs_entry md_raid_disks =
__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
static ssize_t
chunk_size_show(struct mddev *mddev, char *page)
{
if (mddev->reshape_position != MaxSector &&
mddev->chunk_sectors != mddev->new_chunk_sectors)
return sprintf(page, "%d (%d)\n",
mddev->new_chunk_sectors << 9,
mddev->chunk_sectors << 9);
return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
}
static ssize_t
chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long n;
int err;
err = kstrtoul(buf, 10, &n);
if (err < 0)
return err;
err = mddev_lock(mddev);
if (err)
return err;
if (mddev->pers) {
if (mddev->pers->check_reshape == NULL)
err = -EBUSY;
else if (mddev->ro)
err = -EROFS;
else {
mddev->new_chunk_sectors = n >> 9;
err = mddev->pers->check_reshape(mddev);
if (err)
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
} else {
mddev->new_chunk_sectors = n >> 9;
if (mddev->reshape_position == MaxSector)
mddev->chunk_sectors = n >> 9;
}
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_chunk_size =
__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
static ssize_t
resync_start_show(struct mddev *mddev, char *page)
{
if (mddev->recovery_cp == MaxSector)
return sprintf(page, "none\n");
return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
}
static ssize_t
resync_start_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long long n;
int err;
if (cmd_match(buf, "none"))
n = MaxSector;
else {
err = kstrtoull(buf, 10, &n);
if (err < 0)
return err;
if (n != (sector_t)n)
return -EINVAL;
}
err = mddev_lock(mddev);
if (err)
return err;
if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
err = -EBUSY;
if (!err) {
mddev->recovery_cp = n;
if (mddev->pers)
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_resync_start =
__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
resync_start_show, resync_start_store);
/*
* The array state can be:
*
* clear
* No devices, no size, no level
* Equivalent to STOP_ARRAY ioctl
* inactive
* May have some settings, but array is not active
* all IO results in error
* When written, doesn't tear down array, but just stops it
* suspended (not supported yet)
* All IO requests will block. The array can be reconfigured.
* Writing this, if accepted, will block until array is quiescent
* readonly
* no resync can happen. no superblocks get written.
* write requests fail
* read-auto
* like readonly, but behaves like 'clean' on a write request.
*
* clean - no pending writes, but otherwise active.
* When written to inactive array, starts without resync
* If a write request arrives then
* if metadata is known, mark 'dirty' and switch to 'active'.
* if not known, block and switch to write-pending
* If written to an active array that has pending writes, then fails.
* active
* fully active: IO and resync can be happening.
* When written to inactive array, starts with resync
*
* write-pending
* clean, but writes are blocked waiting for 'active' to be written.
*
* active-idle
* like active, but no writes have been seen for a while (100msec).
*
*/
enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
write_pending, active_idle, bad_word};
static char *array_states[] = {
"clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
"write-pending", "active-idle", NULL };
static int match_word(const char *word, char **list)
{
int n;
for (n=0; list[n]; n++)
if (cmd_match(word, list[n]))
break;
return n;
}
static ssize_t
array_state_show(struct mddev *mddev, char *page)
{
enum array_state st = inactive;
if (mddev->pers)
switch(mddev->ro) {
case 1:
st = readonly;
break;
case 2:
st = read_auto;
break;
case 0:
if (mddev->in_sync)
st = clean;
else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
st = write_pending;
else if (mddev->safemode)
st = active_idle;
else
st = active;
}
else {
if (list_empty(&mddev->disks) &&
mddev->raid_disks == 0 &&
mddev->dev_sectors == 0)
st = clear;
else
st = inactive;
}
return sprintf(page, "%s\n", array_states[st]);
}
static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
static int do_md_run(struct mddev *mddev);
static int restart_array(struct mddev *mddev);
static ssize_t
array_state_store(struct mddev *mddev, const char *buf, size_t len)
{
int err;
enum array_state st = match_word(buf, array_states);
if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
/* don't take reconfig_mutex when toggling between
* clean and active
*/
spin_lock(&mddev->lock);
if (st == active) {
restart_array(mddev);
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
} else /* st == clean */ {
restart_array(mddev);
if (atomic_read(&mddev->writes_pending) == 0) {
if (mddev->in_sync == 0) {
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
err = 0;
} else
err = -EBUSY;
}
spin_unlock(&mddev->lock);
return err ?: len;
}
err = mddev_lock(mddev);
if (err)
return err;
err = -EINVAL;
switch(st) {
case bad_word:
break;
case clear:
/* stopping an active array */
err = do_md_stop(mddev, 0, NULL);
break;
case inactive:
/* stopping an active array */
if (mddev->pers)
err = do_md_stop(mddev, 2, NULL);
else
err = 0; /* already inactive */
break;
case suspended:
break; /* not supported yet */
case readonly:
if (mddev->pers)
err = md_set_readonly(mddev, NULL);
else {
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
err = do_md_run(mddev);
}
break;
case read_auto:
if (mddev->pers) {
if (mddev->ro == 0)
err = md_set_readonly(mddev, NULL);
else if (mddev->ro == 1)
err = restart_array(mddev);
if (err == 0) {
mddev->ro = 2;
set_disk_ro(mddev->gendisk, 0);
}
} else {
mddev->ro = 2;
err = do_md_run(mddev);
}
break;
case clean:
if (mddev->pers) {
restart_array(mddev);
spin_lock(&mddev->lock);
if (atomic_read(&mddev->writes_pending) == 0) {
if (mddev->in_sync == 0) {
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
err = 0;
} else
err = -EBUSY;
spin_unlock(&mddev->lock);
} else
err = -EINVAL;
break;
case active:
if (mddev->pers) {
restart_array(mddev);
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
mddev->ro = 0;
set_disk_ro(mddev->gendisk, 0);
err = do_md_run(mddev);
}
break;
case write_pending:
case active_idle:
/* these cannot be set */
break;
}
if (!err) {
if (mddev->hold_active == UNTIL_IOCTL)
mddev->hold_active = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_array_state =
__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
static ssize_t
max_corrected_read_errors_show(struct mddev *mddev, char *page) {
return sprintf(page, "%d\n",
atomic_read(&mddev->max_corr_read_errors));
}
static ssize_t
max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned int n;
int rv;
rv = kstrtouint(buf, 10, &n);
if (rv < 0)
return rv;
atomic_set(&mddev->max_corr_read_errors, n);
return len;
}
static struct md_sysfs_entry max_corr_read_errors =
__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
max_corrected_read_errors_store);
static ssize_t
null_show(struct mddev *mddev, char *page)
{
return -EINVAL;
}
static ssize_t
new_dev_store(struct mddev *mddev, const char *buf, size_t len)
{
/* buf must be %d:%d\n? giving major and minor numbers */
/* The new device is added to the array.
* If the array has a persistent superblock, we read the
* superblock to initialise info and check validity.
* Otherwise, only checking done is that in bind_rdev_to_array,
* which mainly checks size.
*/
char *e;
int major = simple_strtoul(buf, &e, 10);
int minor;
dev_t dev;
struct md_rdev *rdev;
int err;
if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
return -EINVAL;
minor = simple_strtoul(e+1, &e, 10);
if (*e && *e != '\n')
return -EINVAL;
dev = MKDEV(major, minor);
if (major != MAJOR(dev) ||
minor != MINOR(dev))
return -EOVERFLOW;
flush_workqueue(md_misc_wq);
err = mddev_lock(mddev);
if (err)
return err;
if (mddev->persistent) {
rdev = md_import_device(dev, mddev->major_version,
mddev->minor_version);
if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
struct md_rdev *rdev0
= list_entry(mddev->disks.next,
struct md_rdev, same_set);
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0)
goto out;
}
} else if (mddev->external)
rdev = md_import_device(dev, -2, -1);
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev)) {
mddev_unlock(mddev);
return PTR_ERR(rdev);
}
err = bind_rdev_to_array(rdev, mddev);
out:
if (err)
export_rdev(rdev);
mddev_unlock(mddev);
return err ? err : len;
}
static struct md_sysfs_entry md_new_device =
__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
static ssize_t
bitmap_store(struct mddev *mddev, const char *buf, size_t len)
{
char *end;
unsigned long chunk, end_chunk;
int err;
err = mddev_lock(mddev);
if (err)
return err;
if (!mddev->bitmap)
goto out;
/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
while (*buf) {
chunk = end_chunk = simple_strtoul(buf, &end, 0);
if (buf == end) break;
if (*end == '-') { /* range */
buf = end + 1;
end_chunk = simple_strtoul(buf, &end, 0);
if (buf == end) break;
}
if (*end && !isspace(*end)) break;
bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
buf = skip_spaces(end);
}
bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
out:
mddev_unlock(mddev);
return len;
}
static struct md_sysfs_entry md_bitmap =
__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
static ssize_t
size_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)mddev->dev_sectors / 2);
}
static int update_size(struct mddev *mddev, sector_t num_sectors);
static ssize_t
size_store(struct mddev *mddev, const char *buf, size_t len)
{
/* If array is inactive, we can reduce the component size, but
* not increase it (except from 0).
* If array is active, we can try an on-line resize
*/
sector_t sectors;
int err = strict_blocks_to_sectors(buf, §ors);
if (err < 0)
return err;
err = mddev_lock(mddev);
if (err)
return err;
if (mddev->pers) {
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
err = update_size(mddev, sectors);
md_update_sb(mddev, 1);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
} else {
if (mddev->dev_sectors == 0 ||
mddev->dev_sectors > sectors)
mddev->dev_sectors = sectors;
else
err = -ENOSPC;
}
mddev_unlock(mddev);
return err ? err : len;
}
static struct md_sysfs_entry md_size =
__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
/* Metadata version.
* This is one of
* 'none' for arrays with no metadata (good luck...)
* 'external' for arrays with externally managed metadata,
* or N.M for internally known formats
*/
static ssize_t
metadata_show(struct mddev *mddev, char *page)
{
if (mddev->persistent)
return sprintf(page, "%d.%d\n",
mddev->major_version, mddev->minor_version);
else if (mddev->external)
return sprintf(page, "external:%s\n", mddev->metadata_type);
else
return sprintf(page, "none\n");
}
static ssize_t
metadata_store(struct mddev *mddev, const char *buf, size_t len)
{
int major, minor;
char *e;
int err;
/* Changing the details of 'external' metadata is
* always permitted. Otherwise there must be
* no devices attached to the array.
*/
err = mddev_lock(mddev);
if (err)
return err;
err = -EBUSY;
if (mddev->external && strncmp(buf, "external:", 9) == 0)
;
else if (!list_empty(&mddev->disks))
goto out_unlock;
err = 0;
if (cmd_match(buf, "none")) {
mddev->persistent = 0;
mddev->external = 0;
mddev->major_version = 0;
mddev->minor_version = 90;
goto out_unlock;
}
if (strncmp(buf, "external:", 9) == 0) {
size_t namelen = len-9;
if (namelen >= sizeof(mddev->metadata_type))
namelen = sizeof(mddev->metadata_type)-1;
strncpy(mddev->metadata_type, buf+9, namelen);
mddev->metadata_type[namelen] = 0;
if (namelen && mddev->metadata_type[namelen-1] == '\n')
mddev->metadata_type[--namelen] = 0;
mddev->persistent = 0;
mddev->external = 1;
mddev->major_version = 0;
mddev->minor_version = 90;
goto out_unlock;
}
major = simple_strtoul(buf, &e, 10);
err = -EINVAL;
if (e==buf || *e != '.')
goto out_unlock;
buf = e+1;
minor = simple_strtoul(buf, &e, 10);
if (e==buf || (*e && *e != '\n') )
goto out_unlock;
err = -ENOENT;
if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
goto out_unlock;
mddev->major_version = major;
mddev->minor_version = minor;
mddev->persistent = 1;
mddev->external = 0;
err = 0;
out_unlock:
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_metadata =
__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
static ssize_t
action_show(struct mddev *mddev, char *page)
{
char *type = "idle";
unsigned long recovery = mddev->recovery;
if (test_bit(MD_RECOVERY_FROZEN, &recovery))
type = "frozen";
else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
(!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
type = "reshape";
else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
type = "resync";
else if (test_bit(MD_RECOVERY_CHECK, &recovery))
type = "check";
else
type = "repair";
} else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
type = "recover";
}
return sprintf(page, "%s\n", type);
}
static ssize_t
action_store(struct mddev *mddev, const char *page, size_t len)
{
if (!mddev->pers || !mddev->pers->sync_request)
return -EINVAL;
if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
if (cmd_match(page, "frozen"))
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
else
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
mddev_lock(mddev) == 0) {
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
}
mddev_unlock(mddev);
}
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return -EBUSY;
else if (cmd_match(page, "resync"))
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
else if (cmd_match(page, "recover")) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (cmd_match(page, "reshape")) {
int err;
if (mddev->pers->start_reshape == NULL)
return -EINVAL;
err = mddev_lock(mddev);
if (!err) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
err = mddev->pers->start_reshape(mddev);
mddev_unlock(mddev);
}
if (err)
return err;
sysfs_notify(&mddev->kobj, NULL, "degraded");
} else {
if (cmd_match(page, "check"))
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
else if (!cmd_match(page, "repair"))
return -EINVAL;
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
}
if (mddev->ro == 2) {
/* A write to sync_action is enough to justify
* canceling read-auto mode
*/
mddev->ro = 0;
md_wakeup_thread(mddev->sync_thread);
}
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
return len;
}
static struct md_sysfs_entry md_scan_mode =
__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
static ssize_t
last_sync_action_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%s\n", mddev->last_sync_action);
}
static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
static ssize_t
mismatch_cnt_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)
atomic64_read(&mddev->resync_mismatches));
}
static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
static ssize_t
sync_min_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_min(mddev),
mddev->sync_speed_min ? "local": "system");
}
static ssize_t
sync_min_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned int min;
int rv;
if (strncmp(buf, "system", 6)==0) {
min = 0;
} else {
rv = kstrtouint(buf, 10, &min);
if (rv < 0)
return rv;
if (min == 0)
return -EINVAL;
}
mddev->sync_speed_min = min;
return len;
}
static struct md_sysfs_entry md_sync_min =
__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
static ssize_t
sync_max_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_max(mddev),
mddev->sync_speed_max ? "local": "system");
}
static ssize_t
sync_max_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned int max;
int rv;
if (strncmp(buf, "system", 6)==0) {
max = 0;
} else {
rv = kstrtouint(buf, 10, &max);
if (rv < 0)
return rv;
if (max == 0)
return -EINVAL;
}
mddev->sync_speed_max = max;
return len;
}
static struct md_sysfs_entry md_sync_max =
__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
static ssize_t
degraded_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d\n", mddev->degraded);
}
static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
static ssize_t
sync_force_parallel_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d\n", mddev->parallel_resync);
}
static ssize_t
sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
{
long n;
if (kstrtol(buf, 10, &n))
return -EINVAL;
if (n != 0 && n != 1)
return -EINVAL;
mddev->parallel_resync = n;
if (mddev->sync_thread)
wake_up(&resync_wait);
return len;
}
/* force parallel resync, even with shared block devices */
static struct md_sysfs_entry md_sync_force_parallel =
__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
sync_force_parallel_show, sync_force_parallel_store);
static ssize_t
sync_speed_show(struct mddev *mddev, char *page)
{
unsigned long resync, dt, db;
if (mddev->curr_resync == 0)
return sprintf(page, "none\n");
resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
dt = (jiffies - mddev->resync_mark) / HZ;
if (!dt) dt++;
db = resync - mddev->resync_mark_cnt;
return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
}
static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
static ssize_t
sync_completed_show(struct mddev *mddev, char *page)
{
unsigned long long max_sectors, resync;
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return sprintf(page, "none\n");
if (mddev->curr_resync == 1 ||
mddev->curr_resync == 2)
return sprintf(page, "delayed\n");
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else
max_sectors = mddev->dev_sectors;
resync = mddev->curr_resync_completed;
return sprintf(page, "%llu / %llu\n", resync, max_sectors);
}
static struct md_sysfs_entry md_sync_completed =
__ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
static ssize_t
min_sync_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)mddev->resync_min);
}
static ssize_t
min_sync_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long long min;
int err;
if (kstrtoull(buf, 10, &min))
return -EINVAL;
spin_lock(&mddev->lock);
err = -EINVAL;
if (min > mddev->resync_max)
goto out_unlock;
err = -EBUSY;
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
goto out_unlock;
/* Round down to multiple of 4K for safety */
mddev->resync_min = round_down(min, 8);
err = 0;
out_unlock:
spin_unlock(&mddev->lock);
return err ?: len;
}
static struct md_sysfs_entry md_min_sync =
__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
static ssize_t
max_sync_show(struct mddev *mddev, char *page)
{
if (mddev->resync_max == MaxSector)
return sprintf(page, "max\n");
else
return sprintf(page, "%llu\n",
(unsigned long long)mddev->resync_max);
}
static ssize_t
max_sync_store(struct mddev *mddev, const char *buf, size_t len)
{
int err;
spin_lock(&mddev->lock);
if (strncmp(buf, "max", 3) == 0)
mddev->resync_max = MaxSector;
else {
unsigned long long max;
int chunk;
err = -EINVAL;
if (kstrtoull(buf, 10, &max))
goto out_unlock;
if (max < mddev->resync_min)
goto out_unlock;
err = -EBUSY;
if (max < mddev->resync_max &&
mddev->ro == 0 &&
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
goto out_unlock;
/* Must be a multiple of chunk_size */
chunk = mddev->chunk_sectors;
if (chunk) {
sector_t temp = max;
err = -EINVAL;
if (sector_div(temp, chunk))
goto out_unlock;
}
mddev->resync_max = max;
}
wake_up(&mddev->recovery_wait);
err = 0;
out_unlock:
spin_unlock(&mddev->lock);
return err ?: len;
}
static struct md_sysfs_entry md_max_sync =
__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
static ssize_t
suspend_lo_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
}
static ssize_t
suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long long old, new;
int err;
err = kstrtoull(buf, 10, &new);
if (err < 0)
return err;
if (new != (sector_t)new)
return -EINVAL;
err = mddev_lock(mddev);
if (err)
return err;
err = -EINVAL;
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
goto unlock;
old = mddev->suspend_lo;
mddev->suspend_lo = new;
if (new >= old)
/* Shrinking suspended region */
mddev->pers->quiesce(mddev, 2);
else {
/* Expanding suspended region - need to wait */
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
err = 0;
unlock:
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_suspend_lo =
__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
static ssize_t
suspend_hi_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
}
static ssize_t
suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long long old, new;
int err;
err = kstrtoull(buf, 10, &new);
if (err < 0)
return err;
if (new != (sector_t)new)
return -EINVAL;
err = mddev_lock(mddev);
if (err)
return err;
err = -EINVAL;
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
goto unlock;
old = mddev->suspend_hi;
mddev->suspend_hi = new;
if (new <= old)
/* Shrinking suspended region */
mddev->pers->quiesce(mddev, 2);
else {
/* Expanding suspended region - need to wait */
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
err = 0;
unlock:
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_suspend_hi =
__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
static ssize_t
reshape_position_show(struct mddev *mddev, char *page)
{
if (mddev->reshape_position != MaxSector)
return sprintf(page, "%llu\n",
(unsigned long long)mddev->reshape_position);
strcpy(page, "none\n");
return 5;
}
static ssize_t
reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
{
struct md_rdev *rdev;
unsigned long long new;
int err;
err = kstrtoull(buf, 10, &new);
if (err < 0)
return err;
if (new != (sector_t)new)
return -EINVAL;
err = mddev_lock(mddev);
if (err)
return err;
err = -EBUSY;
if (mddev->pers)
goto unlock;
mddev->reshape_position = new;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
rdev_for_each(rdev, mddev)
rdev->new_data_offset = rdev->data_offset;
err = 0;
unlock:
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_reshape_position =
__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
reshape_position_store);
static ssize_t
reshape_direction_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%s\n",
mddev->reshape_backwards ? "backwards" : "forwards");
}
static ssize_t
reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
{
int backwards = 0;
int err;
if (cmd_match(buf, "forwards"))
backwards = 0;
else if (cmd_match(buf, "backwards"))
backwards = 1;
else
return -EINVAL;
if (mddev->reshape_backwards == backwards)
return len;
err = mddev_lock(mddev);
if (err)
return err;
/* check if we are allowed to change */
if (mddev->delta_disks)
err = -EBUSY;
else if (mddev->persistent &&
mddev->major_version == 0)
err = -EINVAL;
else
mddev->reshape_backwards = backwards;
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_reshape_direction =
__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
reshape_direction_store);
static ssize_t
array_size_show(struct mddev *mddev, char *page)
{
if (mddev->external_size)
return sprintf(page, "%llu\n",
(unsigned long long)mddev->array_sectors/2);
else
return sprintf(page, "default\n");
}
static ssize_t
array_size_store(struct mddev *mddev, const char *buf, size_t len)
{
sector_t sectors;
int err;
err = mddev_lock(mddev);
if (err)
return err;
if (strncmp(buf, "default", 7) == 0) {
if (mddev->pers)
sectors = mddev->pers->size(mddev, 0, 0);
else
sectors = mddev->array_sectors;
mddev->external_size = 0;
} else {
if (strict_blocks_to_sectors(buf, §ors) < 0)
err = -EINVAL;
else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
err = -E2BIG;
else
mddev->external_size = 1;
}
if (!err) {
mddev->array_sectors = sectors;
if (mddev->pers) {
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
}
}
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_array_size =
__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
array_size_store);
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_layout.attr,
&md_raid_disks.attr,
&md_chunk_size.attr,
&md_size.attr,
&md_resync_start.attr,
&md_metadata.attr,
&md_new_device.attr,
&md_safe_delay.attr,
&md_array_state.attr,
&md_reshape_position.attr,
&md_reshape_direction.attr,
&md_array_size.attr,
&max_corr_read_errors.attr,
NULL,
};
static struct attribute *md_redundancy_attrs[] = {
&md_scan_mode.attr,
&md_last_scan_mode.attr,
&md_mismatches.attr,
&md_sync_min.attr,
&md_sync_max.attr,
&md_sync_speed.attr,
&md_sync_force_parallel.attr,
&md_sync_completed.attr,
&md_min_sync.attr,
&md_max_sync.attr,
&md_suspend_lo.attr,
&md_suspend_hi.attr,
&md_bitmap.attr,
&md_degraded.attr,
NULL,
};
static struct attribute_group md_redundancy_group = {
.name = NULL,
.attrs = md_redundancy_attrs,
};
static ssize_t
md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
struct mddev *mddev = container_of(kobj, struct mddev, kobj);
ssize_t rv;
if (!entry->show)
return -EIO;
spin_lock(&all_mddevs_lock);
if (list_empty(&mddev->all_mddevs)) {
spin_unlock(&all_mddevs_lock);
return -EBUSY;
}
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
rv = entry->show(mddev, page);
mddev_put(mddev);
return rv;
}
static ssize_t
md_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
struct mddev *mddev = container_of(kobj, struct mddev, kobj);
ssize_t rv;
if (!entry->store)
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
spin_lock(&all_mddevs_lock);
if (list_empty(&mddev->all_mddevs)) {
spin_unlock(&all_mddevs_lock);
return -EBUSY;
}
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
rv = entry->store(mddev, page, length);
mddev_put(mddev);
return rv;
}
static void md_free(struct kobject *ko)
{
struct mddev *mddev = container_of(ko, struct mddev, kobj);
if (mddev->sysfs_state)
sysfs_put(mddev->sysfs_state);
if (mddev->queue)
blk_cleanup_queue(mddev->queue);
if (mddev->gendisk) {
del_gendisk(mddev->gendisk);
put_disk(mddev->gendisk);
}
kfree(mddev);
}
static const struct sysfs_ops md_sysfs_ops = {
.show = md_attr_show,
.store = md_attr_store,
};
static struct kobj_type md_ktype = {
.release = md_free,
.sysfs_ops = &md_sysfs_ops,
.default_attrs = md_default_attrs,
};
int mdp_major = 0;
static void mddev_delayed_delete(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
}
static int md_alloc(dev_t dev, char *name)
{
static DEFINE_MUTEX(disks_mutex);
struct mddev *mddev = mddev_find(dev);
struct gendisk *disk;
int partitioned;
int shift;
int unit;
int error;
if (!mddev)
return -ENODEV;
partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
shift = partitioned ? MdpMinorShift : 0;
unit = MINOR(mddev->unit) >> shift;
/* wait for any previous instance of this device to be
* completely removed (mddev_delayed_delete).
*/
flush_workqueue(md_misc_wq);
mutex_lock(&disks_mutex);
error = -EEXIST;
if (mddev->gendisk)
goto abort;
if (name) {
/* Need to ensure that 'name' is not a duplicate.
*/
struct mddev *mddev2;
spin_lock(&all_mddevs_lock);
list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
if (mddev2->gendisk &&
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
goto abort;
}
spin_unlock(&all_mddevs_lock);
}
error = -ENOMEM;
mddev->queue = blk_alloc_queue(GFP_KERNEL);
if (!mddev->queue)
goto abort;
mddev->queue->queuedata = mddev;
blk_queue_make_request(mddev->queue, md_make_request);
blk_set_stacking_limits(&mddev->queue->limits);
disk = alloc_disk(1 << shift);
if (!disk) {
blk_cleanup_queue(mddev->queue);
mddev->queue = NULL;
goto abort;
}
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
if (name)
strcpy(disk->disk_name, name);
else if (partitioned)
sprintf(disk->disk_name, "md_d%d", unit);
else
sprintf(disk->disk_name, "md%d", unit);
disk->fops = &md_fops;
disk->private_data = mddev;
disk->queue = mddev->queue;
blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
/* Allow extended partitions. This makes the
* 'mdp' device redundant, but we can't really
* remove it now.
*/
disk->flags |= GENHD_FL_EXT_DEVT;
mddev->gendisk = disk;
/* As soon as we call add_disk(), another thread could get
* through to md_open, so make sure it doesn't get too far
*/
mutex_lock(&mddev->open_mutex);
add_disk(disk);
error = kobject_init_and_add(&mddev->kobj, &md_ktype,
&disk_to_dev(disk)->kobj, "%s", "md");
if (error) {
/* This isn't possible, but as kobject_init_and_add is marked
* __must_check, we must do something with the result
*/
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
disk->disk_name);
error = 0;
}
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
printk(KERN_DEBUG "pointless warning\n");
mutex_unlock(&mddev->open_mutex);
abort:
mutex_unlock(&disks_mutex);
if (!error && mddev->kobj.sd) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
}
mddev_put(mddev);
return error;
}
static struct kobject *md_probe(dev_t dev, int *part, void *data)
{
md_alloc(dev, NULL);
return NULL;
}
static int add_named_array(const char *val, struct kernel_param *kp)
{
/* val must be "md_*" where * is not all digits.
* We allocate an array with a large free minor number, and
* set the name to val. val must not already be an active name.
*/
int len = strlen(val);
char buf[DISK_NAME_LEN];
while (len && val[len-1] == '\n')
len--;
if (len >= DISK_NAME_LEN)
return -E2BIG;
strlcpy(buf, val, len+1);
if (strncmp(buf, "md_", 3) != 0)
return -EINVAL;
return md_alloc(0, buf);
}
static void md_safemode_timeout(unsigned long data)
{
struct mddev *mddev = (struct mddev *) data;
if (!atomic_read(&mddev->writes_pending)) {
mddev->safemode = 1;
if (mddev->external)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
md_wakeup_thread(mddev->thread);
}
static int start_dirty_degraded;
int md_run(struct mddev *mddev)
{
int err;
struct md_rdev *rdev;
struct md_personality *pers;
if (list_empty(&mddev->disks))
/* cannot run an array with no devices.. */
return -EINVAL;
if (mddev->pers)
return -EBUSY;
/* Cannot run until previous stop completes properly */
if (mddev->sysfs_active)
return -EBUSY;
/*
* Analyze all RAID superblock(s)
*/
if (!mddev->raid_disks) {
if (!mddev->persistent)
return -EINVAL;
analyze_sbs(mddev);
}
if (mddev->level != LEVEL_NONE)
request_module("md-level-%d", mddev->level);
else if (mddev->clevel[0])
request_module("md-%s", mddev->clevel);
/*
* Drop all container device buffers, from now on
* the only valid external interface is through the md
* device.
*/
rdev_for_each(rdev, mddev) {
if (test_bit(Faulty, &rdev->flags))
continue;
sync_blockdev(rdev->bdev);
invalidate_bdev(rdev->bdev);
/* perform some consistency tests on the device.
* We don't want the data to overlap the metadata,
* Internal Bitmap issues have been handled elsewhere.
*/
if (rdev->meta_bdev) {
/* Nothing to check */;
} else if (rdev->data_offset < rdev->sb_start) {
if (mddev->dev_sectors &&
rdev->data_offset + mddev->dev_sectors
> rdev->sb_start) {
printk("md: %s: data overlaps metadata\n",
mdname(mddev));
return -EINVAL;
}
} else {
if (rdev->sb_start + rdev->sb_size/512
> rdev->data_offset) {
printk("md: %s: metadata overlaps data\n",
mdname(mddev));
return -EINVAL;
}
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
if (mddev->bio_set == NULL)
mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
if (mddev->level != LEVEL_NONE)
printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
mddev->level);
else
printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
mddev->clevel);
return -EINVAL;
}
spin_unlock(&pers_lock);
if (mddev->level != pers->level) {
mddev->level = pers->level;
mddev->new_level = pers->level;
}
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
if (mddev->reshape_position != MaxSector &&
pers->start_reshape == NULL) {
/* This personality cannot handle reshaping... */
module_put(pers->owner);
return -EINVAL;
}
if (pers->sync_request) {
/* Warn if this is a potentially silly
* configuration.
*/
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct md_rdev *rdev2;
int warned = 0;
rdev_for_each(rdev, mddev)
rdev_for_each(rdev2, mddev) {
if (rdev < rdev2 &&
rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
printk(KERN_WARNING
"%s: WARNING: %s appears to be"
" on the same physical disk as"
" %s.\n",
mdname(mddev),
bdevname(rdev->bdev,b),
bdevname(rdev2->bdev,b2));
warned = 1;
}
}
if (warned)
printk(KERN_WARNING
"True protection against single-disk"
" failure might be compromised.\n");
}
mddev->recovery = 0;
/* may be over-ridden by personality */
mddev->resync_max_sectors = mddev->dev_sectors;
mddev->ok_start_degraded = start_dirty_degraded;
if (start_readonly && mddev->ro == 0)
mddev->ro = 2; /* read-only, but switch on first write */
err = pers->run(mddev);
if (err)
printk(KERN_ERR "md: pers->run() failed ...\n");
else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
WARN_ONCE(!mddev->external_size, "%s: default size too small,"
" but 'external_size' not in effect?\n", __func__);
printk(KERN_ERR
"md: invalid array_size %llu > default size %llu\n",
(unsigned long long)mddev->array_sectors / 2,
(unsigned long long)pers->size(mddev, 0, 0) / 2);
err = -EINVAL;
}
if (err == 0 && pers->sync_request &&
(mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
struct bitmap *bitmap;
bitmap = bitmap_create(mddev, -1);
if (IS_ERR(bitmap)) {
err = PTR_ERR(bitmap);
printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
mdname(mddev), err);
} else
mddev->bitmap = bitmap;
}
if (err) {
mddev_detach(mddev);
if (mddev->private)
pers->free(mddev, mddev->private);
mddev->private = NULL;
module_put(pers->owner);
bitmap_destroy(mddev);
return err;
}
if (mddev->queue) {
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = md_congested;
blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec);
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_redundancy_group))
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
atomic_set(&mddev->writes_pending,0);
atomic_set(&mddev->max_corr_read_errors,
MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
mddev->safemode = 0;
mddev->safemode_timer.function = md_safemode_timeout;
mddev->safemode_timer.data = (unsigned long) mddev;
mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
mddev->in_sync = 1;
smp_wmb();
spin_lock(&mddev->lock);
mddev->pers = pers;
mddev->ready = 1;
spin_unlock(&mddev->lock);
rdev_for_each(rdev, mddev)
if (rdev->raid_disk >= 0)
if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
if (mddev->flags & MD_UPDATE_SB_FLAGS)
md_update_sb(mddev, 0);
md_new_event(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
sysfs_notify_dirent_safe(mddev->sysfs_action);
sysfs_notify(&mddev->kobj, NULL, "degraded");
return 0;
}
EXPORT_SYMBOL_GPL(md_run);
static int do_md_run(struct mddev *mddev)
{
int err;
err = md_run(mddev);
if (err)
goto out;
err = bitmap_load(mddev);
if (err) {
bitmap_destroy(mddev);
goto out;
}
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
out:
return err;
}
static int restart_array(struct mddev *mddev)
{
struct gendisk *disk = mddev->gendisk;
/* Complain if it has no devices */
if (list_empty(&mddev->disks))
return -ENXIO;
if (!mddev->pers)
return -EINVAL;
if (!mddev->ro)
return -EBUSY;
mddev->safemode = 0;
mddev->ro = 0;
set_disk_ro(disk, 0);
printk(KERN_INFO "md: %s switched to read-write mode.\n",
mdname(mddev));
/* Kick recovery or resync if necessary */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
static void md_clean(struct mddev *mddev)
{
mddev->array_sectors = 0;
mddev->external_size = 0;
mddev->dev_sectors = 0;
mddev->raid_disks = 0;
mddev->recovery_cp = 0;
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->reshape_position = MaxSector;
mddev->external = 0;
mddev->persistent = 0;
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
mddev->flags = 0;
mddev->ro = 0;
mddev->metadata_type[0] = 0;
mddev->chunk_sectors = 0;
mddev->ctime = mddev->utime = 0;
mddev->layout = 0;
mddev->max_disks = 0;
mddev->events = 0;
mddev->can_decrease_events = 0;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
mddev->new_level = LEVEL_NONE;
mddev->new_layout = 0;
mddev->new_chunk_sectors = 0;
mddev->curr_resync = 0;
atomic64_set(&mddev->resync_mismatches, 0);
mddev->suspend_lo = mddev->suspend_hi = 0;
mddev->sync_speed_min = mddev->sync_speed_max = 0;
mddev->recovery = 0;
mddev->in_sync = 0;
mddev->changed = 0;
mddev->degraded = 0;
mddev->safemode = 0;
mddev->private = NULL;
mddev->merge_check_needed = 0;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.default_offset = 0;
mddev->bitmap_info.default_space = 0;
mddev->bitmap_info.chunksize = 0;
mddev->bitmap_info.daemon_sleep = 0;
mddev->bitmap_info.max_write_behind = 0;
}
static void __md_stop_writes(struct mddev *mddev)
{
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
}
del_timer_sync(&mddev->safemode_timer);
bitmap_flush(mddev);
md_super_wait(mddev);
if (mddev->ro == 0 &&
(!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) {
/* mark array as shutdown cleanly */
mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
}
void md_stop_writes(struct mddev *mddev)
{
mddev_lock_nointr(mddev);
__md_stop_writes(mddev);
mddev_unlock(mddev);
}
EXPORT_SYMBOL_GPL(md_stop_writes);
static void mddev_detach(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
/* wait for behind writes to complete */
if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n",
mdname(mddev));
/* need to kick something here to make sure I/O goes? */
wait_event(bitmap->behind_wait,
atomic_read(&bitmap->behind_writes) == 0);
}
if (mddev->pers && mddev->pers->quiesce) {
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
md_unregister_thread(&mddev->thread);
if (mddev->queue)
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
}
static void __md_stop(struct mddev *mddev)
{
struct md_personality *pers = mddev->pers;
mddev_detach(mddev);
/* Ensure ->event_work is done */
flush_workqueue(md_misc_wq);
spin_lock(&mddev->lock);
mddev->ready = 0;
mddev->pers = NULL;
spin_unlock(&mddev->lock);
pers->free(mddev, mddev->private);
mddev->private = NULL;
if (pers->sync_request && mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
module_put(pers->owner);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
void md_stop(struct mddev *mddev)
{
/* stop the array and free an attached data structures.
* This is called from dm-raid
*/
__md_stop(mddev);
bitmap_destroy(mddev);
if (mddev->bio_set)
bioset_free(mddev->bio_set);
}
EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
{
int err = 0;
int did_freeze = 0;
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
did_freeze = 1;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
if (mddev->sync_thread)
/* Thread might be blocked waiting for metadata update
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
mddev_unlock(mddev);
wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
&mddev->recovery));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
err = -EBUSY;
goto out;
}
if (mddev->pers) {
__md_stop_writes(mddev);
err = -ENXIO;
if (mddev->ro==1)
goto out;
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
err = 0;
}
out:
mutex_unlock(&mddev->open_mutex);
return err;
}
/* mode:
* 0 - completely stop and dis-assemble array
* 2 - stop but do not disassemble array
*/
static int do_md_stop(struct mddev *mddev, int mode,
struct block_device *bdev)
{
struct gendisk *disk = mddev->gendisk;
struct md_rdev *rdev;
int did_freeze = 0;
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
did_freeze = 1;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
if (mddev->sync_thread)
/* Thread might be blocked waiting for metadata update
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
mddev_unlock(mddev);
wait_event(resync_wait, (mddev->sync_thread == NULL &&
!test_bit(MD_RECOVERY_RUNNING,
&mddev->recovery)));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sysfs_active ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
return -EBUSY;
}
if (mddev->pers) {
if (mddev->ro)
set_disk_ro(disk, 0);
__md_stop_writes(mddev);
__md_stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state);
rdev_for_each(rdev, mddev)
if (rdev->raid_disk >= 0)
sysfs_unlink_rdev(mddev, rdev);
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
mddev->changed = 1;
revalidate_disk(disk);
if (mddev->ro)
mddev->ro = 0;
} else
mutex_unlock(&mddev->open_mutex);
/*
* Free resources if final stop
*/
if (mode == 0) {
printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
bitmap_destroy(mddev);
if (mddev->bitmap_info.file) {
struct file *f = mddev->bitmap_info.file;
spin_lock(&mddev->lock);
mddev->bitmap_info.file = NULL;
spin_unlock(&mddev->lock);
fput(f);
}
mddev->bitmap_info.offset = 0;
export_array(mddev);
md_clean(mddev);
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
}
blk_integrity_unregister(disk);
md_new_event(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
#ifndef MODULE
static void autorun_array(struct mddev *mddev)
{
struct md_rdev *rdev;
int err;
if (list_empty(&mddev->disks))
return;
printk(KERN_INFO "md: running: ");
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
printk("<%s>", bdevname(rdev->bdev,b));
}
printk("\n");
err = do_md_run(mddev);
if (err) {
printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
do_md_stop(mddev, 0, NULL);
}
}
/*
* lets try to run arrays based on all disks that have arrived
* until now. (those are in pending_raid_disks)
*
* the method: pick the first pending disk, collect all disks with
* the same UUID, remove all from the pending list and put them into
* the 'same_array' list. Then order this list based on superblock
* update time (freshest comes first), kick out 'old' disks and
* compare superblocks. If everything's fine then run it.
*
* If "unit" is allocated, then bump its reference count
*/
static void autorun_devices(int part)
{
struct md_rdev *rdev0, *rdev, *tmp;
struct mddev *mddev;
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: autorun ...\n");
while (!list_empty(&pending_raid_disks)) {
int unit;
dev_t dev;
LIST_HEAD(candidates);
rdev0 = list_entry(pending_raid_disks.next,
struct md_rdev, same_set);
printk(KERN_INFO "md: considering %s ...\n",
bdevname(rdev0->bdev,b));
INIT_LIST_HEAD(&candidates);
rdev_for_each_list(rdev, tmp, &pending_raid_disks)
if (super_90_load(rdev, rdev0, 0) >= 0) {
printk(KERN_INFO "md: adding %s ...\n",
bdevname(rdev->bdev,b));
list_move(&rdev->same_set, &candidates);
}
/*
* now we have a set of devices, with all of them having
* mostly sane superblocks. It's time to allocate the
* mddev.
*/
if (part) {
dev = MKDEV(mdp_major,
rdev0->preferred_minor << MdpMinorShift);
unit = MINOR(dev) >> MdpMinorShift;
} else {
dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
unit = MINOR(dev);
}
if (rdev0->preferred_minor != unit) {
printk(KERN_INFO "md: unit number in %s is bad: %d\n",
bdevname(rdev0->bdev, b), rdev0->preferred_minor);
break;
}
md_probe(dev, NULL, NULL);
mddev = mddev_find(dev);
if (!mddev || !mddev->gendisk) {
if (mddev)
mddev_put(mddev);
printk(KERN_ERR
"md: cannot allocate memory for md drive.\n");
break;
}
if (mddev_lock(mddev))
printk(KERN_WARNING "md: %s locked, cannot run\n",
mdname(mddev));
else if (mddev->raid_disks || mddev->major_version
|| !list_empty(&mddev->disks)) {
printk(KERN_WARNING
"md: %s already running, cannot run %s\n",
mdname(mddev), bdevname(rdev0->bdev,b));
mddev_unlock(mddev);
} else {
printk(KERN_INFO "md: created %s\n", mdname(mddev));
mddev->persistent = 1;
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
if (bind_rdev_to_array(rdev, mddev))
export_rdev(rdev);
}
autorun_array(mddev);
mddev_unlock(mddev);
}
/* on success, candidates will be empty, on error
* it won't...
*/
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
export_rdev(rdev);
}
mddev_put(mddev);
}
printk(KERN_INFO "md: ... autorun DONE.\n");
}
#endif /* !MODULE */
static int get_version(void __user *arg)
{
mdu_version_t ver;
ver.major = MD_MAJOR_VERSION;
ver.minor = MD_MINOR_VERSION;
ver.patchlevel = MD_PATCHLEVEL_VERSION;
if (copy_to_user(arg, &ver, sizeof(ver)))
return -EFAULT;
return 0;
}
static int get_array_info(struct mddev *mddev, void __user *arg)
{
mdu_array_info_t info;
int nr,working,insync,failed,spare;
struct md_rdev *rdev;
nr = working = insync = failed = spare = 0;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
nr++;
if (test_bit(Faulty, &rdev->flags))
failed++;
else {
working++;
if (test_bit(In_sync, &rdev->flags))
insync++;
else
spare++;
}
}
rcu_read_unlock();
info.major_version = mddev->major_version;
info.minor_version = mddev->minor_version;
info.patch_version = MD_PATCHLEVEL_VERSION;
info.ctime = mddev->ctime;
info.level = mddev->level;
info.size = mddev->dev_sectors / 2;
if (info.size != mddev->dev_sectors / 2) /* overflow */
info.size = -1;
info.nr_disks = nr;
info.raid_disks = mddev->raid_disks;
info.md_minor = mddev->md_minor;
info.not_persistent= !mddev->persistent;
info.utime = mddev->utime;
info.state = 0;
if (mddev->in_sync)
info.state = (1<<MD_SB_CLEAN);
if (mddev->bitmap && mddev->bitmap_info.offset)
info.state |= (1<<MD_SB_BITMAP_PRESENT);
if (mddev_is_clustered(mddev))
info.state |= (1<<MD_SB_CLUSTERED);
info.active_disks = insync;
info.working_disks = working;
info.failed_disks = failed;
info.spare_disks = spare;
info.layout = mddev->layout;
info.chunk_size = mddev->chunk_sectors << 9;
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int get_bitmap_file(struct mddev *mddev, void __user * arg)
{
mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
char *ptr;
int err;
file = kmalloc(sizeof(*file), GFP_NOIO);
if (!file)
return -ENOMEM;
err = 0;
spin_lock(&mddev->lock);
/* bitmap disabled, zero the first byte and copy out */
if (!mddev->bitmap_info.file)
file->pathname[0] = '\0';
else if ((ptr = file_path(mddev->bitmap_info.file,
file->pathname, sizeof(file->pathname))),
IS_ERR(ptr))
err = PTR_ERR(ptr);
else
memmove(file->pathname, ptr,
sizeof(file->pathname)-(ptr-file->pathname));
spin_unlock(&mddev->lock);
if (err == 0 &&
copy_to_user(arg, file, sizeof(*file)))
err = -EFAULT;
kfree(file);
return err;
}
static int get_disk_info(struct mddev *mddev, void __user * arg)
{
mdu_disk_info_t info;
struct md_rdev *rdev;
if (copy_from_user(&info, arg, sizeof(info)))
return -EFAULT;
rcu_read_lock();
rdev = md_find_rdev_nr_rcu(mddev, info.number);
if (rdev) {
info.major = MAJOR(rdev->bdev->bd_dev);
info.minor = MINOR(rdev->bdev->bd_dev);
info.raid_disk = rdev->raid_disk;
info.state = 0;
if (test_bit(Faulty, &rdev->flags))
info.state |= (1<<MD_DISK_FAULTY);
else if (test_bit(In_sync, &rdev->flags)) {
info.state |= (1<<MD_DISK_ACTIVE);
info.state |= (1<<MD_DISK_SYNC);
}
if (test_bit(WriteMostly, &rdev->flags))
info.state |= (1<<MD_DISK_WRITEMOSTLY);
} else {
info.major = info.minor = 0;
info.raid_disk = -1;
info.state = (1<<MD_DISK_REMOVED);
}
rcu_read_unlock();
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
{
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct md_rdev *rdev;
dev_t dev = MKDEV(info->major,info->minor);
if (mddev_is_clustered(mddev) &&
!(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
pr_err("%s: Cannot add to clustered mddev.\n",
mdname(mddev));
return -EINVAL;
}
if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
return -EOVERFLOW;
if (!mddev->raid_disks) {
int err;
/* expecting a device which has a superblock */
rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
if (!list_empty(&mddev->disks)) {
struct md_rdev *rdev0
= list_entry(mddev->disks.next,
struct md_rdev, same_set);
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) {
printk(KERN_WARNING
"md: %s has different UUID to %s\n",
bdevname(rdev->bdev,b),
bdevname(rdev0->bdev,b2));
export_rdev(rdev);
return -EINVAL;
}
}
err = bind_rdev_to_array(rdev, mddev);
if (err)
export_rdev(rdev);
return err;
}
/*
* add_new_disk can be used once the array is assembled
* to add "hot spares". They must already have a superblock
* written
*/
if (mddev->pers) {
int err;
if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING
"%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
}
if (mddev->persistent)
rdev = md_import_device(dev, mddev->major_version,
mddev->minor_version);
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
/* set saved_raid_disk if appropriate */
if (!mddev->persistent) {
if (info->state & (1<<MD_DISK_SYNC) &&
info->raid_disk < mddev->raid_disks) {
rdev->raid_disk = info->raid_disk;
set_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
} else
rdev->raid_disk = -1;
rdev->saved_raid_disk = rdev->raid_disk;
} else
super_types[mddev->major_version].
validate_super(mddev, rdev);
if ((info->state & (1<<MD_DISK_SYNC)) &&
rdev->raid_disk != info->raid_disk) {
/* This was a hot-add request, but events doesn't
* match, so reject it.
*/
export_rdev(rdev);
return -EINVAL;
}
clear_bit(In_sync, &rdev->flags); /* just to be sure */
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
else
clear_bit(WriteMostly, &rdev->flags);
/*
* check whether the device shows up in other nodes
*/
if (mddev_is_clustered(mddev)) {
if (info->state & (1 << MD_DISK_CANDIDATE)) {
/* Through --cluster-confirm */
set_bit(Candidate, &rdev->flags);
err = md_cluster_ops->new_disk_ack(mddev, true);
if (err) {
export_rdev(rdev);
return err;
}
} else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
/* --add initiated by this node */
err = md_cluster_ops->add_new_disk_start(mddev, rdev);
if (err) {
md_cluster_ops->add_new_disk_finish(mddev);
export_rdev(rdev);
return err;
}
}
}
rdev->raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (err)
export_rdev(rdev);
else
err = add_bound_rdev(rdev);
if (mddev_is_clustered(mddev) &&
(info->state & (1 << MD_DISK_CLUSTER_ADD)))
md_cluster_ops->add_new_disk_finish(mddev);
return err;
}
/* otherwise, add_new_disk is only allowed
* for major_version==0 superblocks
*/
if (mddev->major_version != 0) {
printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
mdname(mddev));
return -EINVAL;
}
if (!(info->state & (1<<MD_DISK_FAULTY))) {
int err;
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
rdev->desc_nr = info->number;
if (info->raid_disk < mddev->raid_disks)
rdev->raid_disk = info->raid_disk;
else
rdev->raid_disk = -1;
if (rdev->raid_disk < mddev->raid_disks)
if (info->state & (1<<MD_DISK_SYNC))
set_bit(In_sync, &rdev->flags);
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
if (!mddev->persistent) {
printk(KERN_INFO "md: nonpersistent superblock ...\n");
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
} else
rdev->sb_start = calc_dev_sboffset(rdev);
rdev->sectors = rdev->sb_start;
err = bind_rdev_to_array(rdev, mddev);
if (err) {
export_rdev(rdev);
return err;
}
}
return 0;
}
static int hot_remove_disk(struct mddev *mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
struct md_rdev *rdev;
rdev = find_rdev(mddev, dev);
if (!rdev)
return -ENXIO;
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
clear_bit(Blocked, &rdev->flags);
remove_and_add_spares(mddev, rdev);
if (rdev->raid_disk >= 0)
goto busy;
if (mddev_is_clustered(mddev))
md_cluster_ops->remove_disk(mddev, rdev);
md_kick_rdev_from_array(rdev);
md_update_sb(mddev, 1);
md_new_event(mddev);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
return 0;
busy:
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_cancel(mddev);
printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
bdevname(rdev->bdev,b), mdname(mddev));
return -EBUSY;
}
static int hot_add_disk(struct mddev *mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
int err;
struct md_rdev *rdev;
if (!mddev->pers)
return -ENODEV;
if (mddev->major_version != 0) {
printk(KERN_WARNING "%s: HOT_ADD may only be used with"
" version-0 superblocks.\n",
mdname(mddev));
return -EINVAL;
}
if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING
"%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
}
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return -EINVAL;
}
if (mddev->persistent)
rdev->sb_start = calc_dev_sboffset(rdev);
else
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
rdev->sectors = rdev->sb_start;
if (test_bit(Faulty, &rdev->flags)) {
printk(KERN_WARNING
"md: can not hot-add faulty %s disk to %s!\n",
bdevname(rdev->bdev,b), mdname(mddev));
err = -EINVAL;
goto abort_export;
}
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
clear_bit(In_sync, &rdev->flags);
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (err)
goto abort_clustered;
/*
* The rest should better be atomic, we can have disk failures
* noticed in interrupt contexts ...
*/
rdev->raid_disk = -1;
md_update_sb(mddev, 1);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
*/
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_new_event(mddev);
return 0;
abort_clustered:
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_cancel(mddev);
abort_export:
export_rdev(rdev);
return err;
}
static int set_bitmap_file(struct mddev *mddev, int fd)
{
int err = 0;
if (mddev->pers) {
if (!mddev->pers->quiesce || !mddev->thread)
return -EBUSY;
if (mddev->recovery || mddev->sync_thread)
return -EBUSY;
/* we should be able to change the bitmap.. */
}
if (fd >= 0) {
struct inode *inode;
struct file *f;
if (mddev->bitmap || mddev->bitmap_info.file)
return -EEXIST; /* cannot add when bitmap is present */
f = fget(fd);
if (f == NULL) {
printk(KERN_ERR "%s: error: failed to get bitmap file\n",
mdname(mddev));
return -EBADF;
}
inode = f->f_mapping->host;
if (!S_ISREG(inode->i_mode)) {
printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
mdname(mddev));
err = -EBADF;
} else if (!(f->f_mode & FMODE_WRITE)) {
printk(KERN_ERR "%s: error: bitmap file must open for write\n",
mdname(mddev));
err = -EBADF;
} else if (atomic_read(&inode->i_writecount) != 1) {
printk(KERN_ERR "%s: error: bitmap file is already in use\n",
mdname(mddev));
err = -EBUSY;
}
if (err) {
fput(f);
return err;
}
mddev->bitmap_info.file = f;
mddev->bitmap_info.offset = 0; /* file overrides offset */
} else if (mddev->bitmap == NULL)
return -ENOENT; /* cannot remove what isn't there */
err = 0;
if (mddev->pers) {
mddev->pers->quiesce(mddev, 1);
if (fd >= 0) {
struct bitmap *bitmap;
bitmap = bitmap_create(mddev, -1);
if (!IS_ERR(bitmap)) {
mddev->bitmap = bitmap;
err = bitmap_load(mddev);
} else
err = PTR_ERR(bitmap);
}
if (fd < 0 || err) {
bitmap_destroy(mddev);
fd = -1; /* make sure to put the file */
}
mddev->pers->quiesce(mddev, 0);
}
if (fd < 0) {
struct file *f = mddev->bitmap_info.file;
if (f) {
spin_lock(&mddev->lock);
mddev->bitmap_info.file = NULL;
spin_unlock(&mddev->lock);
fput(f);
}
}
return err;
}
/*
* set_array_info is used two different ways
* The original usage is when creating a new array.
* In this usage, raid_disks is > 0 and it together with
* level, size, not_persistent,layout,chunksize determine the
* shape of the array.
* This will always create an array with a type-0.90.0 superblock.
* The newer usage is when assembling an array.
* In this case raid_disks will be 0, and the major_version field is
* use to determine which style super-blocks are to be found on the devices.
* The minor and patch _version numbers are also kept incase the
* super_block handler wishes to interpret them.
*/
static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
{
if (info->raid_disks == 0) {
/* just setting version number for superblock loading */
if (info->major_version < 0 ||
info->major_version >= ARRAY_SIZE(super_types) ||
super_types[info->major_version].name == NULL) {
/* maybe try to auto-load a module? */
printk(KERN_INFO
"md: superblock version %d not known\n",
info->major_version);
return -EINVAL;
}
mddev->major_version = info->major_version;
mddev->minor_version = info->minor_version;
mddev->patch_version = info->patch_version;
mddev->persistent = !info->not_persistent;
/* ensure mddev_put doesn't delete this now that there
* is some minimal configuration.
*/
mddev->ctime = get_seconds();
return 0;
}
mddev->major_version = MD_MAJOR_VERSION;
mddev->minor_version = MD_MINOR_VERSION;
mddev->patch_version = MD_PATCHLEVEL_VERSION;
mddev->ctime = get_seconds();
mddev->level = info->level;
mddev->clevel[0] = 0;
mddev->dev_sectors = 2 * (sector_t)info->size;
mddev->raid_disks = info->raid_disks;
/* don't set md_minor, it is determined by which /dev/md* was
* openned
*/
if (info->state & (1<<MD_SB_CLEAN))
mddev->recovery_cp = MaxSector;
else
mddev->recovery_cp = 0;
mddev->persistent = ! info->not_persistent;
mddev->external = 0;
mddev->layout = info->layout;
mddev->chunk_sectors = info->chunk_size >> 9;
mddev->max_disks = MD_SB_DISKS;
if (mddev->persistent)
mddev->flags = 0;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
mddev->bitmap_info.offset = 0;
mddev->reshape_position = MaxSector;
/*
* Generate a 128 bit UUID
*/
get_random_bytes(mddev->uuid, 16);
mddev->new_level = mddev->level;
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->new_layout = mddev->layout;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
return 0;
}
void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
{
WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
if (mddev->external_size)
return;
mddev->array_sectors = array_sectors;
}
EXPORT_SYMBOL(md_set_array_sectors);
static int update_size(struct mddev *mddev, sector_t num_sectors)
{
struct md_rdev *rdev;
int rv;
int fit = (num_sectors == 0);
if (mddev->pers->resize == NULL)
return -EINVAL;
/* The "num_sectors" is the number of sectors of each device that
* is used. This can only make sense for arrays with redundancy.
* linear and raid0 always use whatever space is available. We can only
* consider changing this number if no resync or reconstruction is
* happening, and if the new size is acceptable. It must fit before the
* sb_start or, if that is <data_offset, it must fit before the size
* of each device. If num_sectors is zero, we find the largest size
* that fits.
*/
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
mddev->sync_thread)
return -EBUSY;
if (mddev->ro)
return -EROFS;
rdev_for_each(rdev, mddev) {
sector_t avail = rdev->sectors;
if (fit && (num_sectors == 0 || num_sectors > avail))
num_sectors = avail;
if (avail < num_sectors)
return -ENOSPC;
}
rv = mddev->pers->resize(mddev, num_sectors);
if (!rv)
revalidate_disk(mddev->gendisk);
return rv;
}
static int update_raid_disks(struct mddev *mddev, int raid_disks)
{
int rv;
struct md_rdev *rdev;
/* change the number of raid disks */
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
if (mddev->ro)
return -EROFS;
if (raid_disks <= 0 ||
(mddev->max_disks && raid_disks >= mddev->max_disks))
return -EINVAL;
if (mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
mddev->reshape_position != MaxSector)
return -EBUSY;
rdev_for_each(rdev, mddev) {
if (mddev->raid_disks < raid_disks &&
rdev->data_offset < rdev->new_data_offset)
return -EINVAL;
if (mddev->raid_disks > raid_disks &&
rdev->data_offset > rdev->new_data_offset)
return -EINVAL;
}
mddev->delta_disks = raid_disks - mddev->raid_disks;
if (mddev->delta_disks < 0)
mddev->reshape_backwards = 1;
else if (mddev->delta_disks > 0)
mddev->reshape_backwards = 0;
rv = mddev->pers->check_reshape(mddev);
if (rv < 0) {
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
}
return rv;
}
/*
* update_array_info is used to change the configuration of an
* on-line array.
* The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
* fields in the info are checked against the array.
* Any differences that cannot be handled will cause an error.
* Normally, only one change can be managed at a time.
*/
static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
{
int rv = 0;
int cnt = 0;
int state = 0;
/* calculate expected state,ignoring low bits */
if (mddev->bitmap && mddev->bitmap_info.offset)
state |= (1 << MD_SB_BITMAP_PRESENT);
if (mddev->major_version != info->major_version ||
mddev->minor_version != info->minor_version ||
/* mddev->patch_version != info->patch_version || */
mddev->ctime != info->ctime ||
mddev->level != info->level ||
/* mddev->layout != info->layout || */
mddev->persistent != !info->not_persistent ||
mddev->chunk_sectors != info->chunk_size >> 9 ||
/* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
((state^info->state) & 0xfffffe00)
)
return -EINVAL;
/* Check there is only one change */
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
cnt++;
if (mddev->raid_disks != info->raid_disks)
cnt++;
if (mddev->layout != info->layout)
cnt++;
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
cnt++;
if (cnt == 0)
return 0;
if (cnt > 1)
return -EINVAL;
if (mddev->layout != info->layout) {
/* Change layout
* we don't need to do anything at the md level, the
* personality will take care of it all.
*/
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
else {
mddev->new_layout = info->layout;
rv = mddev->pers->check_reshape(mddev);
if (rv)
mddev->new_layout = mddev->layout;
return rv;
}
}
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
rv = update_size(mddev, (sector_t)info->size * 2);
if (mddev->raid_disks != info->raid_disks)
rv = update_raid_disks(mddev, info->raid_disks);
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
rv = -EINVAL;
goto err;
}
if (mddev->recovery || mddev->sync_thread) {
rv = -EBUSY;
goto err;
}
if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
struct bitmap *bitmap;
/* add the bitmap */
if (mddev->bitmap) {
rv = -EEXIST;
goto err;
}
if (mddev->bitmap_info.default_offset == 0) {
rv = -EINVAL;
goto err;
}
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
mddev->pers->quiesce(mddev, 1);
bitmap = bitmap_create(mddev, -1);
if (!IS_ERR(bitmap)) {
mddev->bitmap = bitmap;
rv = bitmap_load(mddev);
} else
rv = PTR_ERR(bitmap);
if (rv)
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
} else {
/* remove the bitmap */
if (!mddev->bitmap) {
rv = -ENOENT;
goto err;
}
if (mddev->bitmap->storage.file) {
rv = -EINVAL;
goto err;
}
mddev->pers->quiesce(mddev, 1);
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
mddev->bitmap_info.offset = 0;
}
}
md_update_sb(mddev, 1);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
return rv;
err:
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_cancel(mddev);
return rv;
}
static int set_disk_faulty(struct mddev *mddev, dev_t dev)
{
struct md_rdev *rdev;
int err = 0;
if (mddev->pers == NULL)
return -ENODEV;
rcu_read_lock();
rdev = find_rdev_rcu(mddev, dev);
if (!rdev)
err = -ENODEV;
else {
md_error(mddev, rdev);
if (!test_bit(Faulty, &rdev->flags))
err = -EBUSY;
}
rcu_read_unlock();
return err;
}
/*
* We have a problem here : there is no easy way to give a CHS
* virtual geometry. We currently pretend that we have a 2 heads
* 4 sectors (with a BIG number of cylinders...). This drives
* dosfs just mad... ;-)
*/
static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct mddev *mddev = bdev->bd_disk->private_data;
geo->heads = 2;
geo->sectors = 4;
geo->cylinders = mddev->array_sectors / 8;
return 0;
}
static inline bool md_ioctl_valid(unsigned int cmd)
{
switch (cmd) {
case ADD_NEW_DISK:
case BLKROSET:
case GET_ARRAY_INFO:
case GET_BITMAP_FILE:
case GET_DISK_INFO:
case HOT_ADD_DISK:
case HOT_REMOVE_DISK:
case RAID_AUTORUN:
case RAID_VERSION:
case RESTART_ARRAY_RW:
case RUN_ARRAY:
case SET_ARRAY_INFO:
case SET_BITMAP_FILE:
case SET_DISK_FAULTY:
case STOP_ARRAY:
case STOP_ARRAY_RO:
case CLUSTERED_DISK_NACK:
return true;
default:
return false;
}
}
static int md_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
int err = 0;
void __user *argp = (void __user *)arg;
struct mddev *mddev = NULL;
int ro;
if (!md_ioctl_valid(cmd))
return -ENOTTY;
switch (cmd) {
case RAID_VERSION:
case GET_ARRAY_INFO:
case GET_DISK_INFO:
break;
default:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
}
/*
* Commands dealing with the RAID driver but not any
* particular array:
*/
switch (cmd) {
case RAID_VERSION:
err = get_version(argp);
goto out;
#ifndef MODULE
case RAID_AUTORUN:
err = 0;
autostart_arrays(arg);
goto out;
#endif
default:;
}
/*
* Commands creating/starting a new array:
*/
mddev = bdev->bd_disk->private_data;
if (!mddev) {
BUG();
goto out;
}
/* Some actions do not requires the mutex */
switch (cmd) {
case GET_ARRAY_INFO:
if (!mddev->raid_disks && !mddev->external)
err = -ENODEV;
else
err = get_array_info(mddev, argp);
goto out;
case GET_DISK_INFO:
if (!mddev->raid_disks && !mddev->external)
err = -ENODEV;
else
err = get_disk_info(mddev, argp);
goto out;
case SET_DISK_FAULTY:
err = set_disk_faulty(mddev, new_decode_dev(arg));
goto out;
case GET_BITMAP_FILE:
err = get_bitmap_file(mddev, argp);
goto out;
}
if (cmd == ADD_NEW_DISK)
/* need to ensure md_delayed_delete() has completed */
flush_workqueue(md_misc_wq);
if (cmd == HOT_REMOVE_DISK)
/* need to ensure recovery thread has run */
wait_event_interruptible_timeout(mddev->sb_wait,
!test_bit(MD_RECOVERY_NEEDED,
&mddev->flags),
msecs_to_jiffies(5000));
if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
/* Need to flush page cache, and ensure no-one else opens
* and writes
*/
mutex_lock(&mddev->open_mutex);
if (mddev->pers && atomic_read(&mddev->openers) > 1) {
mutex_unlock(&mddev->open_mutex);
err = -EBUSY;
goto out;
}
set_bit(MD_STILL_CLOSED, &mddev->flags);
mutex_unlock(&mddev->open_mutex);
sync_blockdev(bdev);
}
err = mddev_lock(mddev);
if (err) {
printk(KERN_INFO
"md: ioctl lock interrupted, reason %d, cmd %d\n",
err, cmd);
goto out;
}
if (cmd == SET_ARRAY_INFO) {
mdu_array_info_t info;
if (!arg)
memset(&info, 0, sizeof(info));
else if (copy_from_user(&info, argp, sizeof(info))) {
err = -EFAULT;
goto unlock;
}
if (mddev->pers) {
err = update_array_info(mddev, &info);
if (err) {
printk(KERN_WARNING "md: couldn't update"
" array info. %d\n", err);
goto unlock;
}
goto unlock;
}
if (!list_empty(&mddev->disks)) {
printk(KERN_WARNING
"md: array %s already has disks!\n",
mdname(mddev));
err = -EBUSY;
goto unlock;
}
if (mddev->raid_disks) {
printk(KERN_WARNING
"md: array %s already initialised!\n",
mdname(mddev));
err = -EBUSY;
goto unlock;
}
err = set_array_info(mddev, &info);
if (err) {
printk(KERN_WARNING "md: couldn't set"
" array info. %d\n", err);
goto unlock;
}
goto unlock;
}
/*
* Commands querying/configuring an existing array:
*/
/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
* RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
if ((!mddev->raid_disks && !mddev->external)
&& cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
&& cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
&& cmd != GET_BITMAP_FILE) {
err = -ENODEV;
goto unlock;
}
/*
* Commands even a read-only array can execute:
*/
switch (cmd) {
case RESTART_ARRAY_RW:
err = restart_array(mddev);
goto unlock;
case STOP_ARRAY:
err = do_md_stop(mddev, 0, bdev);
goto unlock;
case STOP_ARRAY_RO:
err = md_set_readonly(mddev, bdev);
goto unlock;
case HOT_REMOVE_DISK:
err = hot_remove_disk(mddev, new_decode_dev(arg));
goto unlock;
case ADD_NEW_DISK:
/* We can support ADD_NEW_DISK on read-only arrays
* on if we are re-adding a preexisting device.
* So require mddev->pers and MD_DISK_SYNC.
*/
if (mddev->pers) {
mdu_disk_info_t info;
if (copy_from_user(&info, argp, sizeof(info)))
err = -EFAULT;
else if (!(info.state & (1<<MD_DISK_SYNC)))
/* Need to clear read-only for this */
break;
else
err = add_new_disk(mddev, &info);
goto unlock;
}
break;
case BLKROSET:
if (get_user(ro, (int __user *)(arg))) {
err = -EFAULT;
goto unlock;
}
err = -EINVAL;
/* if the bdev is going readonly the value of mddev->ro
* does not matter, no writes are coming
*/
if (ro)
goto unlock;
/* are we are already prepared for writes? */
if (mddev->ro != 1)
goto unlock;
/* transitioning to readauto need only happen for
* arrays that call md_write_start
*/
if (mddev->pers) {
err = restart_array(mddev);
if (err == 0) {
mddev->ro = 2;
set_disk_ro(mddev->gendisk, 0);
}
}
goto unlock;
}
/*
* The remaining ioctls are changing the state of the
* superblock, so we do not allow them on read-only arrays.
*/
if (mddev->ro && mddev->pers) {
if (mddev->ro == 2) {
mddev->ro = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
/* mddev_unlock will wake thread */
/* If a device failed while we were read-only, we
* need to make sure the metadata is updated now.
*/
if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
mddev_unlock(mddev);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
mddev_lock_nointr(mddev);
}
} else {
err = -EROFS;
goto unlock;
}
}
switch (cmd) {
case ADD_NEW_DISK:
{
mdu_disk_info_t info;
if (copy_from_user(&info, argp, sizeof(info)))
err = -EFAULT;
else
err = add_new_disk(mddev, &info);
goto unlock;
}
case CLUSTERED_DISK_NACK:
if (mddev_is_clustered(mddev))
md_cluster_ops->new_disk_ack(mddev, false);
else
err = -EINVAL;
goto unlock;
case HOT_ADD_DISK:
err = hot_add_disk(mddev, new_decode_dev(arg));
goto unlock;
case RUN_ARRAY:
err = do_md_run(mddev);
goto unlock;
case SET_BITMAP_FILE:
err = set_bitmap_file(mddev, (int)arg);
goto unlock;
default:
err = -EINVAL;
goto unlock;
}
unlock:
if (mddev->hold_active == UNTIL_IOCTL &&
err != -EINVAL)
mddev->hold_active = 0;
mddev_unlock(mddev);
out:
return err;
}
#ifdef CONFIG_COMPAT
static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case HOT_REMOVE_DISK:
case HOT_ADD_DISK:
case SET_DISK_FAULTY:
case SET_BITMAP_FILE:
/* These take in integer arg, do not convert */
break;
default:
arg = (unsigned long)compat_ptr(arg);
break;
}
return md_ioctl(bdev, mode, cmd, arg);
}
#endif /* CONFIG_COMPAT */
static int md_open(struct block_device *bdev, fmode_t mode)
{
/*
* Succeed if we can lock the mddev, which confirms that
* it isn't being stopped right now.
*/
struct mddev *mddev = mddev_find(bdev->bd_dev);
int err;
if (!mddev)
return -ENODEV;
if (mddev->gendisk != bdev->bd_disk) {
/* we are racing with mddev_put which is discarding this
* bd_disk.
*/
mddev_put(mddev);
/* Wait until bdev->bd_disk is definitely gone */
flush_workqueue(md_misc_wq);
/* Then retry the open from the top */
return -ERESTARTSYS;
}
BUG_ON(mddev != bdev->bd_disk->private_data);
if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
goto out;
err = 0;
atomic_inc(&mddev->openers);
clear_bit(MD_STILL_CLOSED, &mddev->flags);
mutex_unlock(&mddev->open_mutex);
check_disk_change(bdev);
out:
return err;
}
static void md_release(struct gendisk *disk, fmode_t mode)
{
struct mddev *mddev = disk->private_data;
BUG_ON(!mddev);
atomic_dec(&mddev->openers);
mddev_put(mddev);
}
static int md_media_changed(struct gendisk *disk)
{
struct mddev *mddev = disk->private_data;
return mddev->changed;
}
static int md_revalidate(struct gendisk *disk)
{
struct mddev *mddev = disk->private_data;
mddev->changed = 0;
return 0;
}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
.open = md_open,
.release = md_release,
.ioctl = md_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
.media_changed = md_media_changed,
.revalidate_disk= md_revalidate,
};
static int md_thread(void *arg)
{
struct md_thread *thread = arg;
/*
* md_thread is a 'system-thread', it's priority should be very
* high. We avoid resource deadlocks individually in each
* raid personality. (RAID5 does preallocation) We also use RR and
* the very same RT priority as kswapd, thus we will never get
* into a priority inversion deadlock.
*
* we definitely have to have equal or higher priority than
* bdflush, otherwise bdflush will deadlock if there are too
* many dirty RAID5 blocks.
*/
allow_signal(SIGKILL);
while (!kthread_should_stop()) {
/* We need to wait INTERRUPTIBLE so that
* we don't add to the load-average.
* That means we need to be sure no signals are
* pending
*/
if (signal_pending(current))
flush_signals(current);
wait_event_interruptible_timeout
(thread->wqueue,
test_bit(THREAD_WAKEUP, &thread->flags)
|| kthread_should_stop(),
thread->timeout);
clear_bit(THREAD_WAKEUP, &thread->flags);
if (!kthread_should_stop())
thread->run(thread);
}
return 0;
}
void md_wakeup_thread(struct md_thread *thread)
{
if (thread) {
pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
set_bit(THREAD_WAKEUP, &thread->flags);
wake_up(&thread->wqueue);
}
}
EXPORT_SYMBOL(md_wakeup_thread);
struct md_thread *md_register_thread(void (*run) (struct md_thread *),
struct mddev *mddev, const char *name)
{
struct md_thread *thread;
thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
if (!thread)
return NULL;
init_waitqueue_head(&thread->wqueue);
thread->run = run;
thread->mddev = mddev;
thread->timeout = MAX_SCHEDULE_TIMEOUT;
thread->tsk = kthread_run(md_thread, thread,
"%s_%s",
mdname(thread->mddev),
name);
if (IS_ERR(thread->tsk)) {
kfree(thread);
return NULL;
}
return thread;
}
EXPORT_SYMBOL(md_register_thread);
void md_unregister_thread(struct md_thread **threadp)
{
struct md_thread *thread = *threadp;
if (!thread)
return;
pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
/* Locking ensures that mddev_unlock does not wake_up a
* non-existent thread
*/
spin_lock(&pers_lock);
*threadp = NULL;
spin_unlock(&pers_lock);
kthread_stop(thread->tsk);
kfree(thread);
}
EXPORT_SYMBOL(md_unregister_thread);
void md_error(struct mddev *mddev, struct md_rdev *rdev)
{
if (!rdev || test_bit(Faulty, &rdev->flags))
return;
if (!mddev->pers || !mddev->pers->error_handler)
return;
mddev->pers->error_handler(mddev,rdev);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
md_new_event_inintr(mddev);
}
EXPORT_SYMBOL(md_error);
/* seq_file implementation /proc/mdstat */
static void status_unused(struct seq_file *seq)
{
int i = 0;
struct md_rdev *rdev;
seq_printf(seq, "unused devices: ");
list_for_each_entry(rdev, &pending_raid_disks, same_set) {
char b[BDEVNAME_SIZE];
i++;
seq_printf(seq, "%s ",
bdevname(rdev->bdev,b));
}
if (!i)
seq_printf(seq, "<none>");
seq_printf(seq, "\n");
}
static void status_resync(struct seq_file *seq, struct mddev *mddev)
{
sector_t max_sectors, resync, res;
unsigned long dt, db;
sector_t rt;
int scale;
unsigned int per_milli;
if (mddev->curr_resync <= 3)
resync = 0;
else
resync = mddev->curr_resync
- atomic_read(&mddev->recovery_active);
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else
max_sectors = mddev->dev_sectors;
WARN_ON(max_sectors == 0);
/* Pick 'scale' such that (resync>>scale)*1000 will fit
* in a sector_t, and (max_sectors>>scale) will fit in a
* u32, as those are the requirements for sector_div.
* Thus 'scale' must be at least 10
*/
scale = 10;
if (sizeof(sector_t) > sizeof(unsigned long)) {
while ( max_sectors/2 > (1ULL<<(scale+32)))
scale++;
}
res = (resync>>scale)*1000;
sector_div(res, (u32)((max_sectors>>scale)+1));
per_milli = res;
{
int i, x = per_milli/50, y = 20-x;
seq_printf(seq, "[");
for (i = 0; i < x; i++)
seq_printf(seq, "=");
seq_printf(seq, ">");
for (i = 0; i < y; i++)
seq_printf(seq, ".");
seq_printf(seq, "] ");
}
seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
(test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
"reshape" :
(test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
"check" :
(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
"resync" : "recovery"))),
per_milli/10, per_milli % 10,
(unsigned long long) resync/2,
(unsigned long long) max_sectors/2);
/*
* dt: time from mark until now
* db: blocks written from mark until now
* rt: remaining time
*
* rt is a sector_t, so could be 32bit or 64bit.
* So we divide before multiply in case it is 32bit and close
* to the limit.
* We scale the divisor (db) by 32 to avoid losing precision
* near the end of resync when the number of remaining sectors
* is close to 'db'.
* We then divide rt by 32 after multiplying by db to compensate.
* The '+1' avoids division by zero if db is very small.
*/
dt = ((jiffies - mddev->resync_mark) / HZ);
if (!dt) dt++;
db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
- mddev->resync_mark_cnt;
rt = max_sectors - resync; /* number of remaining sectors */
sector_div(rt, db/32+1);
rt *= dt;
rt >>= 5;
seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
((unsigned long)rt % 60)/6);
seq_printf(seq, " speed=%ldK/sec", db/2/dt);
}
static void *md_seq_start(struct seq_file *seq, loff_t *pos)
{
struct list_head *tmp;
loff_t l = *pos;
struct mddev *mddev;
if (l >= 0x10000)
return NULL;
if (!l--)
/* header */
return (void*)1;
spin_lock(&all_mddevs_lock);
list_for_each(tmp,&all_mddevs)
if (!l--) {
mddev = list_entry(tmp, struct mddev, all_mddevs);
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
return mddev;
}
spin_unlock(&all_mddevs_lock);
if (!l--)
return (void*)2;/* tail */
return NULL;
}
static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *tmp;
struct mddev *next_mddev, *mddev = v;
++*pos;
if (v == (void*)2)
return NULL;
spin_lock(&all_mddevs_lock);
if (v == (void*)1)
tmp = all_mddevs.next;
else
tmp = mddev->all_mddevs.next;
if (tmp != &all_mddevs)
next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
else {
next_mddev = (void*)2;
*pos = 0x10000;
}
spin_unlock(&all_mddevs_lock);
if (v != (void*)1)
mddev_put(mddev);
return next_mddev;
}
static void md_seq_stop(struct seq_file *seq, void *v)
{
struct mddev *mddev = v;
if (mddev && v != (void*)1 && v != (void*)2)
mddev_put(mddev);
}
static int md_seq_show(struct seq_file *seq, void *v)
{
struct mddev *mddev = v;
sector_t sectors;
struct md_rdev *rdev;
if (v == (void*)1) {
struct md_personality *pers;
seq_printf(seq, "Personalities : ");
spin_lock(&pers_lock);
list_for_each_entry(pers, &pers_list, list)
seq_printf(seq, "[%s] ", pers->name);
spin_unlock(&pers_lock);
seq_printf(seq, "\n");
seq->poll_event = atomic_read(&md_event_count);
return 0;
}
if (v == (void*)2) {
status_unused(seq);
return 0;
}
spin_lock(&mddev->lock);
if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
seq_printf(seq, "%s : %sactive", mdname(mddev),
mddev->pers ? "" : "in");
if (mddev->pers) {
if (mddev->ro==1)
seq_printf(seq, " (read-only)");
if (mddev->ro==2)
seq_printf(seq, " (auto-read-only)");
seq_printf(seq, " %s", mddev->pers->name);
}
sectors = 0;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
char b[BDEVNAME_SIZE];
seq_printf(seq, " %s[%d]",
bdevname(rdev->bdev,b), rdev->desc_nr);
if (test_bit(WriteMostly, &rdev->flags))
seq_printf(seq, "(W)");
if (test_bit(Faulty, &rdev->flags)) {
seq_printf(seq, "(F)");
continue;
}
if (rdev->raid_disk < 0)
seq_printf(seq, "(S)"); /* spare */
if (test_bit(Replacement, &rdev->flags))
seq_printf(seq, "(R)");
sectors += rdev->sectors;
}
rcu_read_unlock();
if (!list_empty(&mddev->disks)) {
if (mddev->pers)
seq_printf(seq, "\n %llu blocks",
(unsigned long long)
mddev->array_sectors / 2);
else
seq_printf(seq, "\n %llu blocks",
(unsigned long long)sectors / 2);
}
if (mddev->persistent) {
if (mddev->major_version != 0 ||
mddev->minor_version != 90) {
seq_printf(seq," super %d.%d",
mddev->major_version,
mddev->minor_version);
}
} else if (mddev->external)
seq_printf(seq, " super external:%s",
mddev->metadata_type);
else
seq_printf(seq, " super non-persistent");
if (mddev->pers) {
mddev->pers->status(seq, mddev);
seq_printf(seq, "\n ");
if (mddev->pers->sync_request) {
if (mddev->curr_resync > 2) {
status_resync(seq, mddev);
seq_printf(seq, "\n ");
} else if (mddev->curr_resync >= 1)
seq_printf(seq, "\tresync=DELAYED\n ");
else if (mddev->recovery_cp < MaxSector)
seq_printf(seq, "\tresync=PENDING\n ");
}
} else
seq_printf(seq, "\n ");
bitmap_status(seq, mddev->bitmap);
seq_printf(seq, "\n");
}
spin_unlock(&mddev->lock);
return 0;
}
static const struct seq_operations md_seq_ops = {
.start = md_seq_start,
.next = md_seq_next,
.stop = md_seq_stop,
.show = md_seq_show,
};
static int md_seq_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int error;
error = seq_open(file, &md_seq_ops);
if (error)
return error;
seq = file->private_data;
seq->poll_event = atomic_read(&md_event_count);
return error;
}
static int md_unloading;
static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
{
struct seq_file *seq = filp->private_data;
int mask;
if (md_unloading)
return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
poll_wait(filp, &md_event_waiters, wait);
/* always allow read */
mask = POLLIN | POLLRDNORM;
if (seq->poll_event != atomic_read(&md_event_count))
mask |= POLLERR | POLLPRI;
return mask;
}
static const struct file_operations md_seq_fops = {
.owner = THIS_MODULE,
.open = md_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
.poll = mdstat_poll,
};
int register_md_personality(struct md_personality *p)
{
printk(KERN_INFO "md: %s personality registered for level %d\n",
p->name, p->level);
spin_lock(&pers_lock);
list_add_tail(&p->list, &pers_list);
spin_unlock(&pers_lock);
return 0;
}
EXPORT_SYMBOL(register_md_personality);
int unregister_md_personality(struct md_personality *p)
{
printk(KERN_INFO "md: %s personality unregistered\n", p->name);
spin_lock(&pers_lock);
list_del_init(&p->list);
spin_unlock(&pers_lock);
return 0;
}
EXPORT_SYMBOL(unregister_md_personality);
int register_md_cluster_operations(struct md_cluster_operations *ops, struct module *module)
{
if (md_cluster_ops != NULL)
return -EALREADY;
spin_lock(&pers_lock);
md_cluster_ops = ops;
md_cluster_mod = module;
spin_unlock(&pers_lock);
return 0;
}
EXPORT_SYMBOL(register_md_cluster_operations);
int unregister_md_cluster_operations(void)
{
spin_lock(&pers_lock);
md_cluster_ops = NULL;
spin_unlock(&pers_lock);
return 0;
}
EXPORT_SYMBOL(unregister_md_cluster_operations);
int md_setup_cluster(struct mddev *mddev, int nodes)
{
int err;
err = request_module("md-cluster");
if (err) {
pr_err("md-cluster module not found.\n");
return -ENOENT;
}
spin_lock(&pers_lock);
if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
spin_unlock(&pers_lock);
return -ENOENT;
}
spin_unlock(&pers_lock);
return md_cluster_ops->join(mddev, nodes);
}
void md_cluster_stop(struct mddev *mddev)
{
if (!md_cluster_ops)
return;
md_cluster_ops->leave(mddev);
module_put(md_cluster_mod);
}
static int is_mddev_idle(struct mddev *mddev, int init)
{
struct md_rdev *rdev;
int idle;
int curr_events;
idle = 1;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
(int)part_stat_read(&disk->part0, sectors[1]) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and
* disk_stats is counted when it completes.
* So resync activity will cause curr_events to be smaller than
* when there was no such activity.
* non-sync IO will cause disk_stat to increase without
* increasing sync_io so curr_events will (eventually)
* be larger than it was before. Once it becomes
* substantially larger, the test below will cause
* the array to appear non-idle, and resync will slow
* down.
* If there is a lot of outstanding resync activity when
* we set last_event to curr_events, then all that activity
* completing might cause the array to appear non-idle
* and resync will be slowed down even though there might
* not have been non-resync activity. This will only
* happen once though. 'last_events' will soon reflect
* the state where there is little or no outstanding
* resync requests, and further resync activity will
* always make curr_events less than last_events.
*
*/
if (init || curr_events - rdev->last_events > 64) {
rdev->last_events = curr_events;
idle = 0;
}
}
rcu_read_unlock();
return idle;
}
void md_done_sync(struct mddev *mddev, int blocks, int ok)
{
/* another "blocks" (512byte) blocks have been synced */
atomic_sub(blocks, &mddev->recovery_active);
wake_up(&mddev->recovery_wait);
if (!ok) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
md_wakeup_thread(mddev->thread);
// stop recovery, signal do_sync ....
}
}
EXPORT_SYMBOL(md_done_sync);
/* md_write_start(mddev, bi)
* If we need to update some array metadata (e.g. 'active' flag
* in superblock) before writing, schedule a superblock update
* and wait for it to complete.
*/
void md_write_start(struct mddev *mddev, struct bio *bi)
{
int did_change = 0;
if (bio_data_dir(bi) != WRITE)
return;
BUG_ON(mddev->ro == 1);
if (mddev->ro == 2) {
/* need to switch to read/write */
mddev->ro = 0;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
did_change = 1;
}
atomic_inc(&mddev->writes_pending);
if (mddev->safemode == 1)
mddev->safemode = 0;
if (mddev->in_sync) {
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
set_bit(MD_CHANGE_PENDING, &mddev->flags);
md_wakeup_thread(mddev->thread);
did_change = 1;
}
spin_unlock(&mddev->lock);
}
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
}
EXPORT_SYMBOL(md_write_start);
void md_write_end(struct mddev *mddev)
{
if (atomic_dec_and_test(&mddev->writes_pending)) {
if (mddev->safemode == 2)
md_wakeup_thread(mddev->thread);
else if (mddev->safemode_delay)
mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
}
}
EXPORT_SYMBOL(md_write_end);
/* md_allow_write(mddev)
* Calling this ensures that the array is marked 'active' so that writes
* may proceed without blocking. It is important to call this before
* attempting a GFP_KERNEL allocation while holding the mddev lock.
* Must be called with mddev_lock held.
*
* In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
* is dropped, so return -EAGAIN after notifying userspace.
*/
int md_allow_write(struct mddev *mddev)
{
if (!mddev->pers)
return 0;
if (mddev->ro)
return 0;
if (!mddev->pers->sync_request)
return 0;
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
set_bit(MD_CHANGE_PENDING, &mddev->flags);
if (mddev->safemode_delay &&
mddev->safemode == 0)
mddev->safemode = 1;
spin_unlock(&mddev->lock);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
md_update_sb(mddev, 0);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
} else
spin_unlock(&mddev->lock);
if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
return -EAGAIN;
else
return 0;
}
EXPORT_SYMBOL_GPL(md_allow_write);
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
#define UPDATE_FREQUENCY (5*60*HZ)
void md_do_sync(struct md_thread *thread)
{
struct mddev *mddev = thread->mddev;
struct mddev *mddev2;
unsigned int currspeed = 0,
window;
sector_t max_sectors,j, io_sectors, recovery_done;
unsigned long mark[SYNC_MARKS];
unsigned long update_time;
sector_t mark_cnt[SYNC_MARKS];
int last_mark,m;
struct list_head *tmp;
sector_t last_check;
int skipped = 0;
struct md_rdev *rdev;
char *desc, *action = NULL;
struct blk_plug plug;
/* just incase thread restarts... */
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
return;
if (mddev->ro) {/* never try to sync a read-only array */
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
return;
}
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
desc = "data-check";
action = "check";
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
desc = "requested-resync";
action = "repair";
} else
desc = "resync";
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
desc = "reshape";
else
desc = "recovery";
mddev->last_sync_action = action ?: desc;
/* we overload curr_resync somewhat here.
* 0 == not engaged in resync at all
* 2 == checking that there is no conflict with another sync
* 1 == like 2, but have yielded to allow conflicting resync to
* commense
* other == active in resync - this many blocks
*
* Before starting a resync we must have set curr_resync to
* 2, and then checked that every "conflicting" array has curr_resync
* less than ours. When we find one that is the same or higher
* we wait on resync_wait. To avoid deadlock, we reduce curr_resync
* to 1 if we choose to yield (based arbitrarily on address of mddev structure).
* This will mean we have to start checking from the beginning again.
*
*/
do {
mddev->curr_resync = 2;
try_again:
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto skip;
for_each_mddev(mddev2, tmp) {
if (mddev2 == mddev)
continue;
if (!mddev->parallel_resync
&& mddev2->curr_resync
&& match_mddev_units(mddev, mddev2)) {
DEFINE_WAIT(wq);
if (mddev < mddev2 && mddev->curr_resync == 2) {
/* arbitrarily yield */
mddev->curr_resync = 1;
wake_up(&resync_wait);
}
if (mddev > mddev2 && mddev->curr_resync == 1)
/* no need to wait here, we can wait the next
* time 'round when curr_resync == 2
*/
continue;
/* We need to wait 'interruptible' so as not to
* contribute to the load average, and not to
* be caught by 'softlockup'
*/
prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
mddev2->curr_resync >= mddev->curr_resync) {
printk(KERN_INFO "md: delaying %s of %s"
" until %s has finished (they"
" share one or more physical units)\n",
desc, mdname(mddev), mdname(mddev2));
mddev_put(mddev2);
if (signal_pending(current))
flush_signals(current);
schedule();
finish_wait(&resync_wait, &wq);
goto try_again;
}
finish_wait(&resync_wait, &wq);
}
}
} while (mddev->curr_resync < 2);
j = 0;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
/* resync follows the size requested by the personality,
* which defaults to physical size, but can be virtual size
*/
max_sectors = mddev->resync_max_sectors;
atomic64_set(&mddev->resync_mismatches, 0);
/* we don't use the checkpoint if there's a bitmap */
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
j = mddev->resync_min;
else if (!mddev->bitmap)
j = mddev->recovery_cp;
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else {
/* recovery follows the physical size of devices */
max_sectors = mddev->dev_sectors;
j = MaxSector;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < j)
j = rdev->recovery_offset;
rcu_read_unlock();
/* If there is a bitmap, we need to make sure all
* writes that started before we added a spare
* complete before we start doing a recovery.
* Otherwise the write might complete and (via
* bitmap_endwrite) set a bit in the bitmap after the
* recovery has checked that bit and skipped that
* region.
*/
if (mddev->bitmap) {
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
}
printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
printk(KERN_INFO "md: minimum _guaranteed_ speed:"
" %d KB/sec/disk.\n", speed_min(mddev));
printk(KERN_INFO "md: using maximum available idle IO bandwidth "
"(but not more than %d KB/sec) for %s.\n",
speed_max(mddev), desc);
is_mddev_idle(mddev, 1); /* this initializes IO event counters */
io_sectors = 0;
for (m = 0; m < SYNC_MARKS; m++) {
mark[m] = jiffies;
mark_cnt[m] = io_sectors;
}
last_mark = 0;
mddev->resync_mark = mark[last_mark];
mddev->resync_mark_cnt = mark_cnt[last_mark];
/*
* Tune reconstruction:
*/
window = 32*(PAGE_SIZE/512);
printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
window/2, (unsigned long long)max_sectors/2);
atomic_set(&mddev->recovery_active, 0);
last_check = 0;
if (j>2) {
printk(KERN_INFO
"md: resuming %s of %s from checkpoint.\n",
desc, mdname(mddev));
mddev->curr_resync = j;
} else
mddev->curr_resync = 3; /* no longer delayed */
mddev->curr_resync_completed = j;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
md_new_event(mddev);
update_time = jiffies;
if (mddev_is_clustered(mddev))
md_cluster_ops->resync_start(mddev, j, max_sectors);
blk_start_plug(&plug);
while (j < max_sectors) {
sector_t sectors;
skipped = 0;
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
((mddev->curr_resync > mddev->curr_resync_completed &&
(mddev->curr_resync - mddev->curr_resync_completed)
> (max_sectors >> 4)) ||
time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
(j - mddev->curr_resync_completed)*2
>= mddev->resync_max - mddev->curr_resync_completed
)) {
/* time to update curr_resync_completed */
wait_event(mddev->recovery_wait,
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed = j;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
j > mddev->recovery_cp)
mddev->recovery_cp = j;
update_time = jiffies;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
while (j >= mddev->resync_max &&
!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* As this condition is controlled by user-space,
* we can block indefinitely, so use '_interruptible'
* to avoid triggering warnings.
*/
flush_signals(current); /* just in case */
wait_event_interruptible(mddev->recovery_wait,
mddev->resync_max > j
|| test_bit(MD_RECOVERY_INTR,
&mddev->recovery));
}
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
sectors = mddev->pers->sync_request(mddev, j, &skipped);
if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
break;
}
if (!skipped) { /* actual IO requested */
io_sectors += sectors;
atomic_add(sectors, &mddev->recovery_active);
}
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
j += sectors;
if (j > 2)
mddev->curr_resync = j;
if (mddev_is_clustered(mddev))
md_cluster_ops->resync_info_update(mddev, j, max_sectors);
mddev->curr_mark_cnt = io_sectors;
if (last_check == 0)
/* this is the earliest that rebuild will be
* visible in /proc/mdstat
*/
md_new_event(mddev);
if (last_check + window > io_sectors || j == max_sectors)
continue;
last_check = io_sectors;
repeat:
if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
/* step marks */
int next = (last_mark+1) % SYNC_MARKS;
mddev->resync_mark = mark[next];
mddev->resync_mark_cnt = mark_cnt[next];
mark[next] = jiffies;
mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
last_mark = next;
}
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
/*
* this loop exits only if either when we are slower than
* the 'hard' speed limit, or the system was IO-idle for
* a jiffy.
* the system might be non-idle CPU-wise, but we only care
* about not overloading the IO subsystem. (things like an
* e2fsck being done on the RAID array should execute fast)
*/
cond_resched();
recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
/((jiffies-mddev->resync_mark)/HZ +1) +1;
if (currspeed > speed_min(mddev)) {
if (currspeed > speed_max(mddev)) {
msleep(500);
goto repeat;
}
if (!is_mddev_idle(mddev, 0)) {
/*
* Give other IO more of a chance.
* The faster the devices, the less we wait.
*/
wait_event(mddev->recovery_wait,
!atomic_read(&mddev->recovery_active));
}
}
}
printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
test_bit(MD_RECOVERY_INTR, &mddev->recovery)
? "interrupted" : "done");
/*
* this also signals 'finished resyncing' to md_stop
*/
blk_finish_plug(&plug);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */
mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (mddev_is_clustered(mddev))
md_cluster_ops->resync_finish(mddev);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > 2) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
printk(KERN_INFO
"md: checkpointing %s of %s.\n",
desc, mdname(mddev));
if (test_bit(MD_RECOVERY_ERROR,
&mddev->recovery))
mddev->recovery_cp =
mddev->curr_resync_completed;
else
mddev->recovery_cp =
mddev->curr_resync;
}
} else
mddev->recovery_cp = MaxSector;
} else {
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
mddev->delta_disks >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < mddev->curr_resync)
rdev->recovery_offset = mddev->curr_resync;
rcu_read_unlock();
}
}
skip:
set_bit(MD_CHANGE_DEVS, &mddev->flags);
spin_lock(&mddev->lock);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* We completed so min/max setting can be forgotten if used. */
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = mddev->curr_resync_completed;
mddev->curr_resync = 0;
spin_unlock(&mddev->lock);
wake_up(&resync_wait);
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread);
return;
}
EXPORT_SYMBOL_GPL(md_do_sync);
static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *this)
{
struct md_rdev *rdev;
int spares = 0;
int removed = 0;
rdev_for_each(rdev, mddev)
if ((this == NULL || rdev == this) &&
rdev->raid_disk >= 0 &&
!test_bit(Blocked, &rdev->flags) &&
(test_bit(Faulty, &rdev->flags) ||
! test_bit(In_sync, &rdev->flags)) &&
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
removed++;
}
}
if (removed && mddev->kobj.sd)
sysfs_notify(&mddev->kobj, NULL, "degraded");
if (this)
goto no_add;
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags))
spares++;
if (rdev->raid_disk >= 0)
continue;
if (test_bit(Faulty, &rdev->flags))
continue;
if (mddev->ro &&
! (rdev->saved_raid_disk >= 0 &&
!test_bit(Bitmap_sync, &rdev->flags)))
continue;
if (rdev->saved_raid_disk < 0)
rdev->recovery_offset = 0;
if (mddev->pers->
hot_add_disk(mddev, rdev) == 0) {
if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
spares++;
md_new_event(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
}
}
no_add:
if (removed)
set_bit(MD_CHANGE_DEVS, &mddev->flags);
return spares;
}
static void md_start_sync(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
mddev->sync_thread = md_register_thread(md_do_sync,
mddev,
"resync");
if (!mddev->sync_thread) {
printk(KERN_ERR "%s: could not start resync"
" thread...\n",
mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
wake_up(&resync_wait);
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
&mddev->recovery))
if (mddev->sysfs_action)
sysfs_notify_dirent_safe(mddev->sysfs_action);
} else
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
}
/*
* This routine is regularly called by all per-raid-array threads to
* deal with generic issues like resync and super-block update.
* Raid personalities that don't have a thread (linear/raid0) do not
* need this as they never do any recovery or update the superblock.
*
* It does not do any resync itself, but rather "forks" off other threads
* to do that as needed.
* When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
* "->recovery" and create a thread at ->sync_thread.
* When the thread finishes it sets MD_RECOVERY_DONE
* and wakeups up this thread which will reap the thread and finish up.
* This thread also removes any faulty devices (with nr_pending == 0).
*
* The overall approach is:
* 1/ if the superblock needs updating, update it.
* 2/ If a recovery thread is running, don't do anything else.
* 3/ If recovery has finished, clean up, possibly marking spares active.
* 4/ If there are any faulty devices, remove them.
* 5/ If array is degraded, try to add spares devices
* 6/ If array has spares or is not in-sync, start a resync thread.
*/
void md_check_recovery(struct mddev *mddev)
{
if (mddev->suspended)
return;
if (mddev->bitmap)
bitmap_daemon_work(mddev);
if (signal_pending(current)) {
if (mddev->pers->sync_request && !mddev->external) {
printk(KERN_INFO "md: %s in immediate safe mode\n",
mdname(mddev));
mddev->safemode = 2;
}
flush_signals(current);
}
if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return;
if ( ! (
(mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->external == 0 && mddev->safemode == 1) ||
(mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
&& !mddev->in_sync && mddev->recovery_cp == MaxSector)
))
return;
if (mddev_trylock(mddev)) {
int spares = 0;
if (mddev->ro) {
struct md_rdev *rdev;
if (!mddev->external && mddev->in_sync)
/* 'Blocked' flag not needed as failed devices
* will be recorded if array switched to read/write.
* Leaving it set will prevent the device
* from being removed.
*/
rdev_for_each(rdev, mddev)
clear_bit(Blocked, &rdev->flags);
/* On a read-only array we can:
* - remove failed devices
* - add already-in_sync devices if the array itself
* is in-sync.
* As we only add devices that are already in-sync,
* we can activate the spares immediately.
*/
remove_and_add_spares(mddev, NULL);
/* There is no thread, but we need to call
* ->spare_active and clear saved_raid_disk
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
if (!mddev->external) {
int did_change = 0;
spin_lock(&mddev->lock);
if (mddev->safemode &&
!atomic_read(&mddev->writes_pending) &&
!mddev->in_sync &&
mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
did_change = 1;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
spin_unlock(&mddev->lock);
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
if (mddev->flags & MD_UPDATE_SB_FLAGS) {
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
md_update_sb(mddev, 0);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
}
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
/* resync/recovery still happening */
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
if (mddev->sync_thread) {
md_reap_sync_thread(mddev);
goto unlock;
}
/* Set RUNNING before clearing NEEDED to avoid
* any transients in the value of "sync_action".
*/
mddev->curr_resync_completed = 0;
spin_lock(&mddev->lock);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
spin_unlock(&mddev->lock);
/* Clear some bits that don't mean anything, but
* might be left set
*/
clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
goto not_running;
/* no recovery is running.
* remove any failed drives, then
* add spares if possible.
* Spares are also removed and re-added, to allow
* the personality to fail the re-add.
*/
if (mddev->reshape_position != MaxSector) {
if (mddev->pers->check_reshape == NULL ||
mddev->pers->check_reshape(mddev) != 0)
/* Cannot proceed */
goto not_running;
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if ((spares = remove_and_add_spares(mddev, NULL))) {
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (mddev->recovery_cp < MaxSector) {
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
/* nothing to be done ... */
goto not_running;
if (mddev->pers->sync_request) {
if (spares) {
/* We are adding a device or devices to an array
* which has the bitmap stored on all devices.
* So make sure all bitmap pages get written
*/
bitmap_write_all(mddev->bitmap);
}
INIT_WORK(&mddev->del_work, md_start_sync);
queue_work(md_misc_wq, &mddev->del_work);
goto unlock;
}
not_running:
if (!mddev->sync_thread) {
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
wake_up(&resync_wait);
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
&mddev->recovery))
if (mddev->sysfs_action)
sysfs_notify_dirent_safe(mddev->sysfs_action);
}
unlock:
wake_up(&mddev->sb_wait);
mddev_unlock(mddev);
}
}
EXPORT_SYMBOL(md_check_recovery);
void md_reap_sync_thread(struct mddev *mddev)
{
struct md_rdev *rdev;
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
/* activate any spares */
if (mddev->pers->spare_active(mddev)) {
sysfs_notify(&mddev->kobj, NULL,
"degraded");
set_bit(MD_CHANGE_DEVS, &mddev->flags);
}
}
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
mddev->pers->finish_reshape)
mddev->pers->finish_reshape(mddev);
/* If array is no-longer degraded, then any saved_raid_disk
* information must be scrapped.
*/
if (!mddev->degraded)
rdev_for_each(rdev, mddev)
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
wake_up(&resync_wait);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
}
EXPORT_SYMBOL(md_reap_sync_thread);
void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
sysfs_notify_dirent_safe(rdev->sysfs_state);
wait_event_timeout(rdev->blocked_wait,
!test_bit(Blocked, &rdev->flags) &&
!test_bit(BlockedBadBlocks, &rdev->flags),
msecs_to_jiffies(5000));
rdev_dec_pending(rdev, mddev);
}
EXPORT_SYMBOL(md_wait_for_blocked_rdev);
void md_finish_reshape(struct mddev *mddev)
{
/* called be personality module when reshape completes. */
struct md_rdev *rdev;
rdev_for_each(rdev, mddev) {
if (rdev->data_offset > rdev->new_data_offset)
rdev->sectors += rdev->data_offset - rdev->new_data_offset;
else
rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
rdev->data_offset = rdev->new_data_offset;
}
}
EXPORT_SYMBOL(md_finish_reshape);
/* Bad block management.
* We can record which blocks on each device are 'bad' and so just
* fail those blocks, or that stripe, rather than the whole device.
* Entries in the bad-block table are 64bits wide. This comprises:
* Length of bad-range, in sectors: 0-511 for lengths 1-512
* Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
* A 'shift' can be set so that larger blocks are tracked and
* consequently larger devices can be covered.
* 'Acknowledged' flag - 1 bit. - the most significant bit.
*
* Locking of the bad-block table uses a seqlock so md_is_badblock
* might need to retry if it is very unlucky.
* We will sometimes want to check for bad blocks in a bi_end_io function,
* so we use the write_seqlock_irq variant.
*
* When looking for a bad block we specify a range and want to
* know if any block in the range is bad. So we binary-search
* to the last range that starts at-or-before the given endpoint,
* (or "before the sector after the target range")
* then see if it ends after the given start.
* We return
* 0 if there are no known bad blocks in the range
* 1 if there are known bad block which are all acknowledged
* -1 if there are bad blocks which have not yet been acknowledged in metadata.
* plus the start/length of the first bad section we overlap.
*/
int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
sector_t *first_bad, int *bad_sectors)
{
int hi;
int lo;
u64 *p = bb->page;
int rv;
sector_t target = s + sectors;
unsigned seq;
if (bb->shift > 0) {
/* round the start down, and the end up */
s >>= bb->shift;
target += (1<<bb->shift) - 1;
target >>= bb->shift;
sectors = target - s;
}
/* 'target' is now the first block after the bad range */
retry:
seq = read_seqbegin(&bb->lock);
lo = 0;
rv = 0;
hi = bb->count;
/* Binary search between lo and hi for 'target'
* i.e. for the last range that starts before 'target'
*/
/* INVARIANT: ranges before 'lo' and at-or-after 'hi'
* are known not to be the last range before target.
* VARIANT: hi-lo is the number of possible
* ranges, and decreases until it reaches 1
*/
while (hi - lo > 1) {
int mid = (lo + hi) / 2;
sector_t a = BB_OFFSET(p[mid]);
if (a < target)
/* This could still be the one, earlier ranges
* could not. */
lo = mid;
else
/* This and later ranges are definitely out. */
hi = mid;
}
/* 'lo' might be the last that started before target, but 'hi' isn't */
if (hi > lo) {
/* need to check all range that end after 's' to see if
* any are unacknowledged.
*/
while (lo >= 0 &&
BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
if (BB_OFFSET(p[lo]) < target) {
/* starts before the end, and finishes after
* the start, so they must overlap
*/
if (rv != -1 && BB_ACK(p[lo]))
rv = 1;
else
rv = -1;
*first_bad = BB_OFFSET(p[lo]);
*bad_sectors = BB_LEN(p[lo]);
}
lo--;
}
}
if (read_seqretry(&bb->lock, seq))
goto retry;
return rv;
}
EXPORT_SYMBOL_GPL(md_is_badblock);
/*
* Add a range of bad blocks to the table.
* This might extend the table, or might contract it
* if two adjacent ranges can be merged.
* We binary-search to find the 'insertion' point, then
* decide how best to handle it.
*/
static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
int acknowledged)
{
u64 *p;
int lo, hi;
int rv = 1;
unsigned long flags;
if (bb->shift < 0)
/* badblocks are disabled */
return 0;
if (bb->shift) {
/* round the start down, and the end up */
sector_t next = s + sectors;
s >>= bb->shift;
next += (1<<bb->shift) - 1;
next >>= bb->shift;
sectors = next - s;
}
write_seqlock_irqsave(&bb->lock, flags);
p = bb->page;
lo = 0;
hi = bb->count;
/* Find the last range that starts at-or-before 's' */
while (hi - lo > 1) {
int mid = (lo + hi) / 2;
sector_t a = BB_OFFSET(p[mid]);
if (a <= s)
lo = mid;
else
hi = mid;
}
if (hi > lo && BB_OFFSET(p[lo]) > s)
hi = lo;
if (hi > lo) {
/* we found a range that might merge with the start
* of our new range
*/
sector_t a = BB_OFFSET(p[lo]);
sector_t e = a + BB_LEN(p[lo]);
int ack = BB_ACK(p[lo]);
if (e >= s) {
/* Yes, we can merge with a previous range */
if (s == a && s + sectors >= e)
/* new range covers old */
ack = acknowledged;
else
ack = ack && acknowledged;
if (e < s + sectors)
e = s + sectors;
if (e - a <= BB_MAX_LEN) {
p[lo] = BB_MAKE(a, e-a, ack);
s = e;
} else {
/* does not all fit in one range,
* make p[lo] maximal
*/
if (BB_LEN(p[lo]) != BB_MAX_LEN)
p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
s = a + BB_MAX_LEN;
}
sectors = e - s;
}
}
if (sectors && hi < bb->count) {
/* 'hi' points to the first range that starts after 's'.
* Maybe we can merge with the start of that range */
sector_t a = BB_OFFSET(p[hi]);
sector_t e = a + BB_LEN(p[hi]);
int ack = BB_ACK(p[hi]);
if (a <= s + sectors) {
/* merging is possible */
if (e <= s + sectors) {
/* full overlap */
e = s + sectors;
ack = acknowledged;
} else
ack = ack && acknowledged;
a = s;
if (e - a <= BB_MAX_LEN) {
p[hi] = BB_MAKE(a, e-a, ack);
s = e;
} else {
p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
s = a + BB_MAX_LEN;
}
sectors = e - s;
lo = hi;
hi++;
}
}
if (sectors == 0 && hi < bb->count) {
/* we might be able to combine lo and hi */
/* Note: 's' is at the end of 'lo' */
sector_t a = BB_OFFSET(p[hi]);
int lolen = BB_LEN(p[lo]);
int hilen = BB_LEN(p[hi]);
int newlen = lolen + hilen - (s - a);
if (s >= a && newlen < BB_MAX_LEN) {
/* yes, we can combine them */
int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
memmove(p + hi, p + hi + 1,
(bb->count - hi - 1) * 8);
bb->count--;
}
}
while (sectors) {
/* didn't merge (it all).
* Need to add a range just before 'hi' */
if (bb->count >= MD_MAX_BADBLOCKS) {
/* No room for more */
rv = 0;
break;
} else {
int this_sectors = sectors;
memmove(p + hi + 1, p + hi,
(bb->count - hi) * 8);
bb->count++;
if (this_sectors > BB_MAX_LEN)
this_sectors = BB_MAX_LEN;
p[hi] = BB_MAKE(s, this_sectors, acknowledged);
sectors -= this_sectors;
s += this_sectors;
}
}
bb->changed = 1;
if (!acknowledged)
bb->unacked_exist = 1;
write_sequnlock_irqrestore(&bb->lock, flags);
return rv;
}
int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new)
{
int rv;
if (is_new)
s += rdev->new_data_offset;
else
s += rdev->data_offset;
rv = md_set_badblocks(&rdev->badblocks,
s, sectors, 0);
if (rv) {
/* Make sure they get written out promptly */
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
md_wakeup_thread(rdev->mddev->thread);
}
return rv;
}
EXPORT_SYMBOL_GPL(rdev_set_badblocks);
/*
* Remove a range of bad blocks from the table.
* This may involve extending the table if we spilt a region,
* but it must not fail. So if the table becomes full, we just
* drop the remove request.
*/
static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
{
u64 *p;
int lo, hi;
sector_t target = s + sectors;
int rv = 0;
if (bb->shift > 0) {
/* When clearing we round the start up and the end down.
* This should not matter as the shift should align with
* the block size and no rounding should ever be needed.
* However it is better the think a block is bad when it
* isn't than to think a block is not bad when it is.
*/
s += (1<<bb->shift) - 1;
s >>= bb->shift;
target >>= bb->shift;
sectors = target - s;
}
write_seqlock_irq(&bb->lock);
p = bb->page;
lo = 0;
hi = bb->count;
/* Find the last range that starts before 'target' */
while (hi - lo > 1) {
int mid = (lo + hi) / 2;
sector_t a = BB_OFFSET(p[mid]);
if (a < target)
lo = mid;
else
hi = mid;
}
if (hi > lo) {
/* p[lo] is the last range that could overlap the
* current range. Earlier ranges could also overlap,
* but only this one can overlap the end of the range.
*/
if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
/* Partial overlap, leave the tail of this range */
int ack = BB_ACK(p[lo]);
sector_t a = BB_OFFSET(p[lo]);
sector_t end = a + BB_LEN(p[lo]);
if (a < s) {
/* we need to split this range */
if (bb->count >= MD_MAX_BADBLOCKS) {
rv = -ENOSPC;
goto out;
}
memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
bb->count++;
p[lo] = BB_MAKE(a, s-a, ack);
lo++;
}
p[lo] = BB_MAKE(target, end - target, ack);
/* there is no longer an overlap */
hi = lo;
lo--;
}
while (lo >= 0 &&
BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
/* This range does overlap */
if (BB_OFFSET(p[lo]) < s) {
/* Keep the early parts of this range. */
int ack = BB_ACK(p[lo]);
sector_t start = BB_OFFSET(p[lo]);
p[lo] = BB_MAKE(start, s - start, ack);
/* now low doesn't overlap, so.. */
break;
}
lo--;
}
/* 'lo' is strictly before, 'hi' is strictly after,
* anything between needs to be discarded
*/
if (hi - lo > 1) {
memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
bb->count -= (hi - lo - 1);
}
}
bb->changed = 1;
out:
write_sequnlock_irq(&bb->lock);
return rv;
}
int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new)
{
if (is_new)
s += rdev->new_data_offset;
else
s += rdev->data_offset;
return md_clear_badblocks(&rdev->badblocks,
s, sectors);
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
/*
* Acknowledge all bad blocks in a list.
* This only succeeds if ->changed is clear. It is used by
* in-kernel metadata updates
*/
void md_ack_all_badblocks(struct badblocks *bb)
{
if (bb->page == NULL || bb->changed)
/* no point even trying */
return;
write_seqlock_irq(&bb->lock);
if (bb->changed == 0 && bb->unacked_exist) {
u64 *p = bb->page;
int i;
for (i = 0; i < bb->count ; i++) {
if (!BB_ACK(p[i])) {
sector_t start = BB_OFFSET(p[i]);
int len = BB_LEN(p[i]);
p[i] = BB_MAKE(start, len, 1);
}
}
bb->unacked_exist = 0;
}
write_sequnlock_irq(&bb->lock);
}
EXPORT_SYMBOL_GPL(md_ack_all_badblocks);
/* sysfs access to bad-blocks list.
* We present two files.
* 'bad-blocks' lists sector numbers and lengths of ranges that
* are recorded as bad. The list is truncated to fit within
* the one-page limit of sysfs.
* Writing "sector length" to this file adds an acknowledged
* bad block list.
* 'unacknowledged-bad-blocks' lists bad blocks that have not yet
* been acknowledged. Writing to this file adds bad blocks
* without acknowledging them. This is largely for testing.
*/
static ssize_t
badblocks_show(struct badblocks *bb, char *page, int unack)
{
size_t len;
int i;
u64 *p = bb->page;
unsigned seq;
if (bb->shift < 0)
return 0;
retry:
seq = read_seqbegin(&bb->lock);
len = 0;
i = 0;
while (len < PAGE_SIZE && i < bb->count) {
sector_t s = BB_OFFSET(p[i]);
unsigned int length = BB_LEN(p[i]);
int ack = BB_ACK(p[i]);
i++;
if (unack && ack)
continue;
len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
(unsigned long long)s << bb->shift,
length << bb->shift);
}
if (unack && len == 0)
bb->unacked_exist = 0;
if (read_seqretry(&bb->lock, seq))
goto retry;
return len;
}
#define DO_DEBUG 1
static ssize_t
badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
{
unsigned long long sector;
int length;
char newline;
#ifdef DO_DEBUG
/* Allow clearing via sysfs *only* for testing/debugging.
* Normally only a successful write may clear a badblock
*/
int clear = 0;
if (page[0] == '-') {
clear = 1;
page++;
}
#endif /* DO_DEBUG */
switch (sscanf(page, "%llu %d%c", §or, &length, &newline)) {
case 3:
if (newline != '\n')
return -EINVAL;
case 2:
if (length <= 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
#ifdef DO_DEBUG
if (clear) {
md_clear_badblocks(bb, sector, length);
return len;
}
#endif /* DO_DEBUG */
if (md_set_badblocks(bb, sector, length, !unack))
return len;
else
return -ENOSPC;
}
static int md_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
struct list_head *tmp;
struct mddev *mddev;
int need_delay = 0;
for_each_mddev(mddev, tmp) {
if (mddev_trylock(mddev)) {
if (mddev->pers)
__md_stop_writes(mddev);
if (mddev->persistent)
mddev->safemode = 2;
mddev_unlock(mddev);
}
need_delay = 1;
}
/*
* certain more exotic SCSI devices are known to be
* volatile wrt too early system reboots. While the
* right place to handle this issue is the given
* driver, we do want to have a safe RAID driver ...
*/
if (need_delay)
mdelay(1000*1);
return NOTIFY_DONE;
}
static struct notifier_block md_notifier = {
.notifier_call = md_notify_reboot,
.next = NULL,
.priority = INT_MAX, /* before any real devices */
};
static void md_geninit(void)
{
pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
}
static int __init md_init(void)
{
int ret = -ENOMEM;
md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
if (!md_wq)
goto err_wq;
md_misc_wq = alloc_workqueue("md_misc", 0, 0);
if (!md_misc_wq)
goto err_misc_wq;
if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
goto err_md;
if ((ret = register_blkdev(0, "mdp")) < 0)
goto err_mdp;
mdp_major = ret;
blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
md_probe, NULL, NULL);
blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
md_probe, NULL, NULL);
register_reboot_notifier(&md_notifier);
raid_table_header = register_sysctl_table(raid_root_table);
md_geninit();
return 0;
err_mdp:
unregister_blkdev(MD_MAJOR, "md");
err_md:
destroy_workqueue(md_misc_wq);
err_misc_wq:
destroy_workqueue(md_wq);
err_wq:
return ret;
}
void md_reload_sb(struct mddev *mddev)
{
struct md_rdev *rdev, *tmp;
rdev_for_each_safe(rdev, tmp, mddev) {
rdev->sb_loaded = 0;
ClearPageUptodate(rdev->sb_page);
}
mddev->raid_disks = 0;
analyze_sbs(mddev);
rdev_for_each_safe(rdev, tmp, mddev) {
struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
/* since we don't write to faulty devices, we figure out if the
* disk is faulty by comparing events
*/
if (mddev->events > sb->events)
set_bit(Faulty, &rdev->flags);
}
}
EXPORT_SYMBOL(md_reload_sb);
#ifndef MODULE
/*
* Searches all registered partitions for autorun RAID arrays
* at boot time.
*/
static LIST_HEAD(all_detected_devices);
struct detected_devices_node {
struct list_head list;
dev_t dev;
};
void md_autodetect_dev(dev_t dev)
{
struct detected_devices_node *node_detected_dev;
node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
if (node_detected_dev) {
node_detected_dev->dev = dev;
list_add_tail(&node_detected_dev->list, &all_detected_devices);
} else {
printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
}
}
static void autostart_arrays(int part)
{
struct md_rdev *rdev;
struct detected_devices_node *node_detected_dev;
dev_t dev;
int i_scanned, i_passed;
i_scanned = 0;
i_passed = 0;
printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
i_scanned++;
node_detected_dev = list_entry(all_detected_devices.next,
struct detected_devices_node, list);
list_del(&node_detected_dev->list);
dev = node_detected_dev->dev;
kfree(node_detected_dev);
rdev = md_import_device(dev,0, 90);
if (IS_ERR(rdev))
continue;
if (test_bit(Faulty, &rdev->flags))
continue;
set_bit(AutoDetected, &rdev->flags);
list_add(&rdev->same_set, &pending_raid_disks);
i_passed++;
}
printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
i_scanned, i_passed);
autorun_devices(part);
}
#endif /* !MODULE */
static __exit void md_exit(void)
{
struct mddev *mddev;
struct list_head *tmp;
int delay = 1;
blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
unregister_blkdev(MD_MAJOR,"md");
unregister_blkdev(mdp_major, "mdp");
unregister_reboot_notifier(&md_notifier);
unregister_sysctl_table(raid_table_header);
/* We cannot unload the modules while some process is
* waiting for us in select() or poll() - wake them up
*/
md_unloading = 1;
while (waitqueue_active(&md_event_waiters)) {
/* not safe to leave yet */
wake_up(&md_event_waiters);
msleep(delay);
delay += delay;
}
remove_proc_entry("mdstat", NULL);
for_each_mddev(mddev, tmp) {
export_array(mddev);
mddev->hold_active = 0;
}
destroy_workqueue(md_misc_wq);
destroy_workqueue(md_wq);
}
subsys_initcall(md_init);
module_exit(md_exit)
static int get_ro(char *buffer, struct kernel_param *kp)
{
return sprintf(buffer, "%d", start_readonly);
}
static int set_ro(const char *val, struct kernel_param *kp)
{
return kstrtouint(val, 10, (unsigned int *)&start_readonly);
}
module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD RAID framework");
MODULE_ALIAS("md");
MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_1694_0 |
crossvul-cpp_data_bad_1829_3 | /***********************************************************************/
/* */
/* OCaml */
/* */
/* Xavier Leroy, projet Cristal, INRIA Rocquencourt */
/* */
/* Copyright 1996 Institut National de Recherche en Informatique et */
/* en Automatique. All rights reserved. This file is distributed */
/* under the terms of the GNU Library General Public License, with */
/* the special exception on linking described in file ../LICENSE. */
/* */
/***********************************************************************/
/* Structured input, compact format */
/* The interface of this file is "caml/intext.h" */
#include <string.h>
#include <stdio.h>
#include "caml/alloc.h"
#include "caml/callback.h"
#include "caml/custom.h"
#include "caml/fail.h"
#include "caml/gc.h"
#include "caml/intext.h"
#include "caml/io.h"
#include "caml/md5.h"
#include "caml/memory.h"
#include "caml/mlvalues.h"
#include "caml/misc.h"
#include "caml/reverse.h"
static unsigned char * intern_src;
/* Reading pointer in block holding input data. */
static unsigned char * intern_input;
/* Pointer to beginning of block holding input data.
Meaningful only if intern_input_malloced = 1. */
static int intern_input_malloced;
/* 1 if intern_input was allocated by caml_stat_alloc()
and needs caml_stat_free() on error, 0 otherwise. */
static header_t * intern_dest;
/* Writing pointer in destination block */
static char * intern_extra_block;
/* If non-NULL, point to new heap chunk allocated with caml_alloc_for_heap. */
static asize_t obj_counter;
/* Count how many objects seen so far */
static value * intern_obj_table;
/* The pointers to objects already seen */
static unsigned int intern_color;
/* Color to assign to newly created headers */
static header_t intern_header;
/* Original header of the destination block.
Meaningful only if intern_extra_block is NULL. */
static value intern_block;
/* Point to the heap block allocated as destination block.
Meaningful only if intern_extra_block is NULL. */
static char * intern_resolve_code_pointer(unsigned char digest[16],
asize_t offset);
CAMLnoreturn_start
static void intern_bad_code_pointer(unsigned char digest[16])
CAMLnoreturn_end;
static void intern_free_stack(void);
#define Sign_extend_shift ((sizeof(intnat) - 1) * 8)
#define Sign_extend(x) (((intnat)(x) << Sign_extend_shift) >> Sign_extend_shift)
#define read8u() (*intern_src++)
#define read8s() Sign_extend(*intern_src++)
#define read16u() \
(intern_src += 2, \
(intern_src[-2] << 8) + intern_src[-1])
#define read16s() \
(intern_src += 2, \
(Sign_extend(intern_src[-2]) << 8) + intern_src[-1])
#define read32u() \
(intern_src += 4, \
((uintnat)(intern_src[-4]) << 24) + (intern_src[-3] << 16) + \
(intern_src[-2] << 8) + intern_src[-1])
#define read32s() \
(intern_src += 4, \
(Sign_extend(intern_src[-4]) << 24) + (intern_src[-3] << 16) + \
(intern_src[-2] << 8) + intern_src[-1])
#ifdef ARCH_SIXTYFOUR
static intnat read64s(void)
{
intnat res;
int i;
res = 0;
for (i = 0; i < 8; i++) res = (res << 8) + intern_src[i];
intern_src += 8;
return res;
}
#endif
#define readblock(dest,len) \
(memmove((dest), intern_src, (len)), intern_src += (len))
static void intern_cleanup(void)
{
if (intern_input_malloced) caml_stat_free(intern_input);
if (intern_obj_table != NULL) caml_stat_free(intern_obj_table);
if (intern_extra_block != NULL) {
/* free newly allocated heap chunk */
caml_free_for_heap(intern_extra_block);
} else if (intern_block != 0) {
/* restore original header for heap block, otherwise GC is confused */
Hd_val(intern_block) = intern_header;
}
/* free the recursion stack */
intern_free_stack();
}
static void readfloat(double * dest, unsigned int code)
{
if (sizeof(double) != 8) {
intern_cleanup();
caml_invalid_argument("input_value: non-standard floats");
}
readblock((char *) dest, 8);
/* Fix up endianness, if needed */
#if ARCH_FLOAT_ENDIANNESS == 0x76543210
/* Host is big-endian; fix up if data read is little-endian */
if (code != CODE_DOUBLE_BIG) Reverse_64(dest, dest);
#elif ARCH_FLOAT_ENDIANNESS == 0x01234567
/* Host is little-endian; fix up if data read is big-endian */
if (code != CODE_DOUBLE_LITTLE) Reverse_64(dest, dest);
#else
/* Host is neither big nor little; permute as appropriate */
if (code == CODE_DOUBLE_LITTLE)
Permute_64(dest, ARCH_FLOAT_ENDIANNESS, dest, 0x01234567)
else
Permute_64(dest, ARCH_FLOAT_ENDIANNESS, dest, 0x76543210);
#endif
}
/* [len] is a number of floats */
static void readfloats(double * dest, mlsize_t len, unsigned int code)
{
mlsize_t i;
if (sizeof(double) != 8) {
intern_cleanup();
caml_invalid_argument("input_value: non-standard floats");
}
readblock((char *) dest, len * 8);
/* Fix up endianness, if needed */
#if ARCH_FLOAT_ENDIANNESS == 0x76543210
/* Host is big-endian; fix up if data read is little-endian */
if (code != CODE_DOUBLE_ARRAY8_BIG &&
code != CODE_DOUBLE_ARRAY32_BIG) {
for (i = 0; i < len; i++) Reverse_64(dest + i, dest + i);
}
#elif ARCH_FLOAT_ENDIANNESS == 0x01234567
/* Host is little-endian; fix up if data read is big-endian */
if (code != CODE_DOUBLE_ARRAY8_LITTLE &&
code != CODE_DOUBLE_ARRAY32_LITTLE) {
for (i = 0; i < len; i++) Reverse_64(dest + i, dest + i);
}
#else
/* Host is neither big nor little; permute as appropriate */
if (code == CODE_DOUBLE_ARRAY8_LITTLE ||
code == CODE_DOUBLE_ARRAY32_LITTLE) {
for (i = 0; i < len; i++)
Permute_64(dest + i, ARCH_FLOAT_ENDIANNESS, dest + i, 0x01234567);
} else {
for (i = 0; i < len; i++)
Permute_64(dest + i, ARCH_FLOAT_ENDIANNESS, dest + i, 0x76543210);
}
#endif
}
/* Item on the stack with defined operation */
struct intern_item {
value * dest;
intnat arg;
enum {
OReadItems, /* read arg items and store them in dest[0], dest[1], ... */
OFreshOID, /* generate a fresh OID and store it in *dest */
OShift /* offset *dest by arg */
} op;
};
/* FIXME: This is duplicated in two other places, with the only difference of
the type of elements stored in the stack. Possible solution in C would
be to instantiate stack these function via. C preprocessor macro.
*/
#define INTERN_STACK_INIT_SIZE 256
#define INTERN_STACK_MAX_SIZE (1024*1024*100)
static struct intern_item intern_stack_init[INTERN_STACK_INIT_SIZE];
static struct intern_item * intern_stack = intern_stack_init;
static struct intern_item * intern_stack_limit = intern_stack_init
+ INTERN_STACK_INIT_SIZE;
/* Free the recursion stack if needed */
static void intern_free_stack(void)
{
if (intern_stack != intern_stack_init) {
free(intern_stack);
/* Reinitialize the globals for next time around */
intern_stack = intern_stack_init;
intern_stack_limit = intern_stack + INTERN_STACK_INIT_SIZE;
}
}
/* Same, then raise Out_of_memory */
static void intern_stack_overflow(void)
{
caml_gc_message (0x04, "Stack overflow in un-marshaling value\n", 0);
intern_free_stack();
caml_raise_out_of_memory();
}
static struct intern_item * intern_resize_stack(struct intern_item * sp)
{
asize_t newsize = 2 * (intern_stack_limit - intern_stack);
asize_t sp_offset = sp - intern_stack;
struct intern_item * newstack;
if (newsize >= INTERN_STACK_MAX_SIZE) intern_stack_overflow();
if (intern_stack == intern_stack_init) {
newstack = malloc(sizeof(struct intern_item) * newsize);
if (newstack == NULL) intern_stack_overflow();
memcpy(newstack, intern_stack_init,
sizeof(struct intern_item) * INTERN_STACK_INIT_SIZE);
} else {
newstack =
realloc(intern_stack, sizeof(struct intern_item) * newsize);
if (newstack == NULL) intern_stack_overflow();
}
intern_stack = newstack;
intern_stack_limit = newstack + newsize;
return newstack + sp_offset;
}
/* Convenience macros for requesting operation on the stack */
#define PushItem() \
do { \
sp++; \
if (sp >= intern_stack_limit) sp = intern_resize_stack(sp); \
} while(0)
#define ReadItems(_dest,_n) \
do { \
if (_n > 0) { \
PushItem(); \
sp->op = OReadItems; \
sp->dest = _dest; \
sp->arg = _n; \
} \
} while(0)
static void intern_rec(value *dest)
{
unsigned int code;
tag_t tag;
mlsize_t size, len, ofs_ind;
value v;
asize_t ofs;
header_t header;
unsigned char digest[16];
struct custom_operations * ops;
char * codeptr;
struct intern_item * sp;
sp = intern_stack;
/* Initially let's try to read the first object from the stream */
ReadItems(dest, 1);
/* The un-marshaler loop, the recursion is unrolled */
while(sp != intern_stack) {
/* Interpret next item on the stack */
dest = sp->dest;
switch (sp->op) {
case OFreshOID:
/* Refresh the object ID */
/* but do not do it for predefined exception slots */
if (Int_val(Field((value)dest, 1)) >= 0)
caml_set_oo_id((value)dest);
/* Pop item and iterate */
sp--;
break;
case OShift:
/* Shift value by an offset */
*dest += sp->arg;
/* Pop item and iterate */
sp--;
break;
case OReadItems:
/* Pop item */
sp->dest++;
if (--(sp->arg) == 0) sp--;
/* Read a value and set v to this value */
code = read8u();
if (code >= PREFIX_SMALL_INT) {
if (code >= PREFIX_SMALL_BLOCK) {
/* Small block */
tag = code & 0xF;
size = (code >> 4) & 0x7;
read_block:
if (size == 0) {
v = Atom(tag);
} else {
v = Val_hp(intern_dest);
if (intern_obj_table != NULL) intern_obj_table[obj_counter++] = v;
*intern_dest = Make_header(size, tag, intern_color);
intern_dest += 1 + size;
/* For objects, we need to freshen the oid */
if (tag == Object_tag) {
Assert(size >= 2);
/* Request to read rest of the elements of the block */
ReadItems(&Field(v, 2), size - 2);
/* Request freshing OID */
PushItem();
sp->op = OFreshOID;
sp->dest = (value*) v;
sp->arg = 1;
/* Finally read first two block elements: method table and old OID */
ReadItems(&Field(v, 0), 2);
} else
/* If it's not an object then read the contents of the block */
ReadItems(&Field(v, 0), size);
}
} else {
/* Small integer */
v = Val_int(code & 0x3F);
}
} else {
if (code >= PREFIX_SMALL_STRING) {
/* Small string */
len = (code & 0x1F);
read_string:
size = (len + sizeof(value)) / sizeof(value);
v = Val_hp(intern_dest);
if (intern_obj_table != NULL) intern_obj_table[obj_counter++] = v;
*intern_dest = Make_header(size, String_tag, intern_color);
intern_dest += 1 + size;
Field(v, size - 1) = 0;
ofs_ind = Bsize_wsize(size) - 1;
Byte(v, ofs_ind) = ofs_ind - len;
readblock(String_val(v), len);
} else {
switch(code) {
case CODE_INT8:
v = Val_long(read8s());
break;
case CODE_INT16:
v = Val_long(read16s());
break;
case CODE_INT32:
v = Val_long(read32s());
break;
case CODE_INT64:
#ifdef ARCH_SIXTYFOUR
v = Val_long(read64s());
break;
#else
intern_cleanup();
caml_failwith("input_value: integer too large");
break;
#endif
case CODE_SHARED8:
ofs = read8u();
read_shared:
Assert (ofs > 0);
Assert (ofs <= obj_counter);
Assert (intern_obj_table != NULL);
v = intern_obj_table[obj_counter - ofs];
break;
case CODE_SHARED16:
ofs = read16u();
goto read_shared;
case CODE_SHARED32:
ofs = read32u();
goto read_shared;
case CODE_BLOCK32:
header = (header_t) read32u();
tag = Tag_hd(header);
size = Wosize_hd(header);
goto read_block;
case CODE_BLOCK64:
#ifdef ARCH_SIXTYFOUR
header = (header_t) read64s();
tag = Tag_hd(header);
size = Wosize_hd(header);
goto read_block;
#else
intern_cleanup();
caml_failwith("input_value: data block too large");
break;
#endif
case CODE_STRING8:
len = read8u();
goto read_string;
case CODE_STRING32:
len = read32u();
goto read_string;
case CODE_DOUBLE_LITTLE:
case CODE_DOUBLE_BIG:
v = Val_hp(intern_dest);
if (intern_obj_table != NULL) intern_obj_table[obj_counter++] = v;
*intern_dest = Make_header(Double_wosize, Double_tag, intern_color);
intern_dest += 1 + Double_wosize;
readfloat((double *) v, code);
break;
case CODE_DOUBLE_ARRAY8_LITTLE:
case CODE_DOUBLE_ARRAY8_BIG:
len = read8u();
read_double_array:
size = len * Double_wosize;
v = Val_hp(intern_dest);
if (intern_obj_table != NULL) intern_obj_table[obj_counter++] = v;
*intern_dest = Make_header(size, Double_array_tag, intern_color);
intern_dest += 1 + size;
readfloats((double *) v, len, code);
break;
case CODE_DOUBLE_ARRAY32_LITTLE:
case CODE_DOUBLE_ARRAY32_BIG:
len = read32u();
goto read_double_array;
case CODE_CODEPOINTER:
ofs = read32u();
readblock(digest, 16);
codeptr = intern_resolve_code_pointer(digest, ofs);
if (codeptr != NULL) {
v = (value) codeptr;
} else {
value * function_placeholder =
caml_named_value ("Debugger.function_placeholder");
if (function_placeholder != NULL) {
v = *function_placeholder;
} else {
intern_cleanup();
intern_bad_code_pointer(digest);
}
}
break;
case CODE_INFIXPOINTER:
ofs = read32u();
/* Read a value to *dest, then offset *dest by ofs */
PushItem();
sp->dest = dest;
sp->op = OShift;
sp->arg = ofs;
ReadItems(dest, 1);
continue; /* with next iteration of main loop, skipping *dest = v */
case CODE_CUSTOM:
ops = caml_find_custom_operations((char *) intern_src);
if (ops == NULL) {
intern_cleanup();
caml_failwith("input_value: unknown custom block identifier");
}
while (*intern_src++ != 0) /*nothing*/; /*skip identifier*/
size = ops->deserialize((void *) (intern_dest + 2));
size = 1 + (size + sizeof(value) - 1) / sizeof(value);
v = Val_hp(intern_dest);
if (intern_obj_table != NULL) intern_obj_table[obj_counter++] = v;
*intern_dest = Make_header(size, Custom_tag, intern_color);
Custom_ops_val(v) = ops;
if (ops->finalize != NULL && Is_young(v)) {
/* Remembered that the block has a finalizer */
if (caml_finalize_table.ptr >= caml_finalize_table.limit){
CAMLassert (caml_finalize_table.ptr == caml_finalize_table.limit);
caml_realloc_ref_table (&caml_finalize_table);
}
*caml_finalize_table.ptr++ = (value *)v;
}
intern_dest += 1 + size;
break;
default:
intern_cleanup();
caml_failwith("input_value: ill-formed message");
}
}
}
/* end of case OReadItems */
*dest = v;
break;
default:
Assert(0);
}
}
/* We are done. Cleanup the stack and leave the function */
intern_free_stack();
}
static void intern_alloc(mlsize_t whsize, mlsize_t num_objects)
{
mlsize_t wosize;
if (whsize == 0) {
intern_obj_table = NULL;
intern_extra_block = NULL;
intern_block = 0;
return;
}
wosize = Wosize_whsize(whsize);
if (wosize > Max_wosize) {
/* Round desired size up to next page */
asize_t request =
((Bsize_wsize(whsize) + Page_size - 1) >> Page_log) << Page_log;
intern_extra_block = caml_alloc_for_heap(request);
if (intern_extra_block == NULL) caml_raise_out_of_memory();
intern_color = caml_allocation_color(intern_extra_block);
intern_dest = (header_t *) intern_extra_block;
} else {
/* this is a specialised version of caml_alloc from alloc.c */
if (wosize == 0){
intern_block = Atom (String_tag);
}else if (wosize <= Max_young_wosize){
intern_block = caml_alloc_small (wosize, String_tag);
}else{
intern_block = caml_alloc_shr (wosize, String_tag);
/* do not do the urgent_gc check here because it might darken
intern_block into gray and break the Assert 3 lines down */
}
intern_header = Hd_val(intern_block);
intern_color = Color_hd(intern_header);
Assert (intern_color == Caml_white || intern_color == Caml_black);
intern_dest = (header_t *) Hp_val(intern_block);
intern_extra_block = NULL;
}
obj_counter = 0;
if (num_objects > 0)
intern_obj_table = (value *) caml_stat_alloc(num_objects * sizeof(value));
else
intern_obj_table = NULL;
}
static void intern_add_to_heap(mlsize_t whsize)
{
/* Add new heap chunk to heap if needed */
if (intern_extra_block != NULL) {
/* If heap chunk not filled totally, build free block at end */
asize_t request =
((Bsize_wsize(whsize) + Page_size - 1) >> Page_log) << Page_log;
header_t * end_extra_block =
(header_t *) intern_extra_block + Wsize_bsize(request);
Assert(intern_dest <= end_extra_block);
if (intern_dest < end_extra_block){
caml_make_free_blocks ((value *) intern_dest,
end_extra_block - intern_dest, 0, Caml_white);
}
caml_allocated_words +=
Wsize_bsize ((char *) intern_dest - intern_extra_block);
caml_add_to_heap(intern_extra_block);
}
}
value caml_input_val(struct channel *chan)
{
uint32_t magic;
mlsize_t block_len, num_objects, whsize;
char * block;
value res;
if (! caml_channel_binary_mode(chan))
caml_failwith("input_value: not a binary channel");
magic = caml_getword(chan);
if (magic != Intext_magic_number) caml_failwith("input_value: bad object");
block_len = caml_getword(chan);
num_objects = caml_getword(chan);
#ifdef ARCH_SIXTYFOUR
caml_getword(chan); /* skip size_32 */
whsize = caml_getword(chan);
#else
whsize = caml_getword(chan);
caml_getword(chan); /* skip size_64 */
#endif
/* Read block from channel */
block = caml_stat_alloc(block_len);
/* During [caml_really_getblock], concurrent [caml_input_val] operations
can take place (via signal handlers or context switching in systhreads),
and [intern_input] may change. So, wait until [caml_really_getblock]
is over before using [intern_input] and the other global vars. */
if (caml_really_getblock(chan, block, block_len) == 0) {
caml_stat_free(block);
caml_failwith("input_value: truncated object");
}
intern_input = (unsigned char *) block;
intern_input_malloced = 1;
intern_src = intern_input;
intern_alloc(whsize, num_objects);
/* Fill it in */
intern_rec(&res);
intern_add_to_heap(whsize);
/* Free everything */
caml_stat_free(intern_input);
if (intern_obj_table != NULL) caml_stat_free(intern_obj_table);
return caml_check_urgent_gc(res);
}
CAMLprim value caml_input_value(value vchan)
{
CAMLparam1 (vchan);
struct channel * chan = Channel(vchan);
CAMLlocal1 (res);
Lock(chan);
res = caml_input_val(chan);
Unlock(chan);
CAMLreturn (res);
}
CAMLexport value caml_input_val_from_string(value str, intnat ofs)
{
CAMLparam1 (str);
mlsize_t num_objects, whsize;
CAMLlocal1 (obj);
intern_src = &Byte_u(str, ofs + 2*4);
intern_input_malloced = 0;
num_objects = read32u();
#ifdef ARCH_SIXTYFOUR
intern_src += 4; /* skip size_32 */
whsize = read32u();
#else
whsize = read32u();
intern_src += 4; /* skip size_64 */
#endif
/* Allocate result */
intern_alloc(whsize, num_objects);
intern_src = &Byte_u(str, ofs + 5*4); /* If a GC occurred */
/* Fill it in */
intern_rec(&obj);
intern_add_to_heap(whsize);
/* Free everything */
if (intern_obj_table != NULL) caml_stat_free(intern_obj_table);
CAMLreturn (caml_check_urgent_gc(obj));
}
CAMLprim value caml_input_value_from_string(value str, value ofs)
{
return caml_input_val_from_string(str, Long_val(ofs));
}
static value input_val_from_block(void)
{
mlsize_t num_objects, whsize;
value obj;
num_objects = read32u();
#ifdef ARCH_SIXTYFOUR
intern_src += 4; /* skip size_32 */
whsize = read32u();
#else
whsize = read32u();
intern_src += 4; /* skip size_64 */
#endif
/* Allocate result */
intern_alloc(whsize, num_objects);
/* Fill it in */
intern_rec(&obj);
intern_add_to_heap(whsize);
/* Free internal data structures */
if (intern_obj_table != NULL) caml_stat_free(intern_obj_table);
return caml_check_urgent_gc(obj);
}
CAMLexport value caml_input_value_from_malloc(char * data, intnat ofs)
{
uint32_t magic;
value obj;
intern_input = (unsigned char *) data;
intern_src = intern_input + ofs;
intern_input_malloced = 1;
magic = read32u();
if (magic != Intext_magic_number)
caml_failwith("input_value_from_malloc: bad object");
intern_src += 4; /* Skip block_len */
obj = input_val_from_block();
/* Free the input */
caml_stat_free(intern_input);
return obj;
}
/* [len] is a number of bytes */
CAMLexport value caml_input_value_from_block(char * data, intnat len)
{
uint32_t magic;
mlsize_t block_len;
value obj;
intern_input = (unsigned char *) data;
intern_src = intern_input;
intern_input_malloced = 0;
magic = read32u();
if (magic != Intext_magic_number)
caml_failwith("input_value_from_block: bad object");
block_len = read32u();
if (5*4 + block_len > len)
caml_failwith("input_value_from_block: bad block length");
obj = input_val_from_block();
return obj;
}
/* [ofs] is a [value] that represents a number of bytes
result is a [value] that represents a number of bytes
*/
CAMLprim value caml_marshal_data_size(value buff, value ofs)
{
uint32_t magic;
mlsize_t block_len;
intern_src = &Byte_u(buff, Long_val(ofs));
intern_input_malloced = 0;
magic = read32u();
if (magic != Intext_magic_number){
caml_failwith("Marshal.data_size: bad object");
}
block_len = read32u();
return Val_long(block_len);
}
/* Resolution of code pointers */
static char * intern_resolve_code_pointer(unsigned char digest[16],
asize_t offset)
{
int i;
for (i = caml_code_fragments_table.size - 1; i >= 0; i--) {
struct code_fragment * cf = caml_code_fragments_table.contents[i];
if (! cf->digest_computed) {
caml_md5_block(cf->digest, cf->code_start, cf->code_end - cf->code_start);
cf->digest_computed = 1;
}
if (memcmp(digest, cf->digest, 16) == 0) {
if (cf->code_start + offset < cf->code_end)
return cf->code_start + offset;
else
return NULL;
}
}
return NULL;
}
static void intern_bad_code_pointer(unsigned char digest[16])
{
char msg[256];
snprintf(msg, sizeof(msg),
"input_value: unknown code module "
"%02X%02X%02X%02X%02X%02X%02X%02X"
"%02X%02X%02X%02X%02X%02X%02X%02X",
digest[0], digest[1], digest[2], digest[3],
digest[4], digest[5], digest[6], digest[7],
digest[8], digest[9], digest[10], digest[11],
digest[12], digest[13], digest[14], digest[15]);
caml_failwith(msg);
}
/* Functions for writing user-defined marshallers */
CAMLexport int caml_deserialize_uint_1(void)
{
return read8u();
}
CAMLexport int caml_deserialize_sint_1(void)
{
return read8s();
}
CAMLexport int caml_deserialize_uint_2(void)
{
return read16u();
}
CAMLexport int caml_deserialize_sint_2(void)
{
return read16s();
}
CAMLexport uint32_t caml_deserialize_uint_4(void)
{
return read32u();
}
CAMLexport int32_t caml_deserialize_sint_4(void)
{
return read32s();
}
CAMLexport uint64_t caml_deserialize_uint_8(void)
{
uint64_t i;
caml_deserialize_block_8(&i, 1);
return i;
}
CAMLexport int64_t caml_deserialize_sint_8(void)
{
int64_t i;
caml_deserialize_block_8(&i, 1);
return i;
}
CAMLexport float caml_deserialize_float_4(void)
{
float f;
caml_deserialize_block_4(&f, 1);
return f;
}
CAMLexport double caml_deserialize_float_8(void)
{
double f;
caml_deserialize_block_float_8(&f, 1);
return f;
}
CAMLexport void caml_deserialize_block_1(void * data, intnat len)
{
memmove(data, intern_src, len);
intern_src += len;
}
CAMLexport void caml_deserialize_block_2(void * data, intnat len)
{
#ifndef ARCH_BIG_ENDIAN
unsigned char * p, * q;
for (p = intern_src, q = data; len > 0; len--, p += 2, q += 2)
Reverse_16(q, p);
intern_src = p;
#else
memmove(data, intern_src, len * 2);
intern_src += len * 2;
#endif
}
CAMLexport void caml_deserialize_block_4(void * data, intnat len)
{
#ifndef ARCH_BIG_ENDIAN
unsigned char * p, * q;
for (p = intern_src, q = data; len > 0; len--, p += 4, q += 4)
Reverse_32(q, p);
intern_src = p;
#else
memmove(data, intern_src, len * 4);
intern_src += len * 4;
#endif
}
CAMLexport void caml_deserialize_block_8(void * data, intnat len)
{
#ifndef ARCH_BIG_ENDIAN
unsigned char * p, * q;
for (p = intern_src, q = data; len > 0; len--, p += 8, q += 8)
Reverse_64(q, p);
intern_src = p;
#else
memmove(data, intern_src, len * 8);
intern_src += len * 8;
#endif
}
CAMLexport void caml_deserialize_block_float_8(void * data, intnat len)
{
#if ARCH_FLOAT_ENDIANNESS == 0x01234567
memmove(data, intern_src, len * 8);
intern_src += len * 8;
#elif ARCH_FLOAT_ENDIANNESS == 0x76543210
unsigned char * p, * q;
for (p = intern_src, q = data; len > 0; len--, p += 8, q += 8)
Reverse_64(q, p);
intern_src = p;
#else
unsigned char * p, * q;
for (p = intern_src, q = data; len > 0; len--, p += 8, q += 8)
Permute_64(q, ARCH_FLOAT_ENDIANNESS, p, 0x01234567);
intern_src = p;
#endif
}
CAMLexport void caml_deserialize_error(char * msg)
{
intern_cleanup();
caml_failwith(msg);
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_1829_3 |
crossvul-cpp_data_bad_5060_0 | /*
* X.25 Packet Layer release 002
*
* This is ALPHA test software. This code may break your machine,
* randomly fail to work with new releases, misbehave and/or generally
* screw up. It might even work.
*
* This code REQUIRES 2.1.15 or higher
*
* This module:
* This module is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* History
* X.25 001 Split from x25_subr.c
* mar/20/00 Daniela Squassoni Disabling/enabling of facilities
* negotiation.
* apr/14/05 Shaun Pereira - Allow fast select with no restriction
* on response.
*/
#define pr_fmt(fmt) "X25: " fmt
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/x25.h>
/**
* x25_parse_facilities - Parse facilities from skb into the facilities structs
*
* @skb: sk_buff to parse
* @facilities: Regular facilities, updated as facilities are found
* @dte_facs: ITU DTE facilities, updated as DTE facilities are found
* @vc_fac_mask: mask is updated with all facilities found
*
* Return codes:
* -1 - Parsing error, caller should drop call and clean up
* 0 - Parse OK, this skb has no facilities
* >0 - Parse OK, returns the length of the facilities header
*
*/
int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
{
unsigned char *p;
unsigned int len;
*vc_fac_mask = 0;
/*
* The kernel knows which facilities were set on an incoming call but
* currently this information is not available to userspace. Here we
* give userspace who read incoming call facilities 0 length to indicate
* it wasn't set.
*/
dte_facs->calling_len = 0;
dte_facs->called_len = 0;
memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
if (!pskb_may_pull(skb, 1))
return 0;
len = skb->data[0];
if (!pskb_may_pull(skb, 1 + len))
return -1;
p = skb->data + 1;
while (len > 0) {
switch (*p & X25_FAC_CLASS_MASK) {
case X25_FAC_CLASS_A:
if (len < 2)
return -1;
switch (*p) {
case X25_FAC_REVERSE:
if((p[1] & 0x81) == 0x81) {
facilities->reverse = p[1] & 0x81;
*vc_fac_mask |= X25_MASK_REVERSE;
break;
}
if((p[1] & 0x01) == 0x01) {
facilities->reverse = p[1] & 0x01;
*vc_fac_mask |= X25_MASK_REVERSE;
break;
}
if((p[1] & 0x80) == 0x80) {
facilities->reverse = p[1] & 0x80;
*vc_fac_mask |= X25_MASK_REVERSE;
break;
}
if(p[1] == 0x00) {
facilities->reverse
= X25_DEFAULT_REVERSE;
*vc_fac_mask |= X25_MASK_REVERSE;
break;
}
case X25_FAC_THROUGHPUT:
facilities->throughput = p[1];
*vc_fac_mask |= X25_MASK_THROUGHPUT;
break;
case X25_MARKER:
break;
default:
pr_debug("unknown facility "
"%02X, value %02X\n",
p[0], p[1]);
break;
}
p += 2;
len -= 2;
break;
case X25_FAC_CLASS_B:
if (len < 3)
return -1;
switch (*p) {
case X25_FAC_PACKET_SIZE:
facilities->pacsize_in = p[1];
facilities->pacsize_out = p[2];
*vc_fac_mask |= X25_MASK_PACKET_SIZE;
break;
case X25_FAC_WINDOW_SIZE:
facilities->winsize_in = p[1];
facilities->winsize_out = p[2];
*vc_fac_mask |= X25_MASK_WINDOW_SIZE;
break;
default:
pr_debug("unknown facility "
"%02X, values %02X, %02X\n",
p[0], p[1], p[2]);
break;
}
p += 3;
len -= 3;
break;
case X25_FAC_CLASS_C:
if (len < 4)
return -1;
pr_debug("unknown facility %02X, "
"values %02X, %02X, %02X\n",
p[0], p[1], p[2], p[3]);
p += 4;
len -= 4;
break;
case X25_FAC_CLASS_D:
if (len < p[1] + 2)
return -1;
switch (*p) {
case X25_FAC_CALLING_AE:
if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
return -1;
if (p[2] > X25_MAX_AE_LEN)
return -1;
dte_facs->calling_len = p[2];
memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLING_AE;
break;
case X25_FAC_CALLED_AE:
if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
return -1;
if (p[2] > X25_MAX_AE_LEN)
return -1;
dte_facs->called_len = p[2];
memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLED_AE;
break;
default:
pr_debug("unknown facility %02X,"
"length %d\n", p[0], p[1]);
break;
}
len -= p[1] + 2;
p += p[1] + 2;
break;
}
}
return p - skb->data;
}
/*
* Create a set of facilities.
*/
int x25_create_facilities(unsigned char *buffer,
struct x25_facilities *facilities,
struct x25_dte_facilities *dte_facs, unsigned long facil_mask)
{
unsigned char *p = buffer + 1;
int len;
if (!facil_mask) {
/*
* Length of the facilities field in call_req or
* call_accept packets
*/
buffer[0] = 0;
len = 1; /* 1 byte for the length field */
return len;
}
if (facilities->reverse && (facil_mask & X25_MASK_REVERSE)) {
*p++ = X25_FAC_REVERSE;
*p++ = facilities->reverse;
}
if (facilities->throughput && (facil_mask & X25_MASK_THROUGHPUT)) {
*p++ = X25_FAC_THROUGHPUT;
*p++ = facilities->throughput;
}
if ((facilities->pacsize_in || facilities->pacsize_out) &&
(facil_mask & X25_MASK_PACKET_SIZE)) {
*p++ = X25_FAC_PACKET_SIZE;
*p++ = facilities->pacsize_in ? : facilities->pacsize_out;
*p++ = facilities->pacsize_out ? : facilities->pacsize_in;
}
if ((facilities->winsize_in || facilities->winsize_out) &&
(facil_mask & X25_MASK_WINDOW_SIZE)) {
*p++ = X25_FAC_WINDOW_SIZE;
*p++ = facilities->winsize_in ? : facilities->winsize_out;
*p++ = facilities->winsize_out ? : facilities->winsize_in;
}
if (facil_mask & (X25_MASK_CALLING_AE|X25_MASK_CALLED_AE)) {
*p++ = X25_MARKER;
*p++ = X25_DTE_SERVICES;
}
if (dte_facs->calling_len && (facil_mask & X25_MASK_CALLING_AE)) {
unsigned int bytecount = (dte_facs->calling_len + 1) >> 1;
*p++ = X25_FAC_CALLING_AE;
*p++ = 1 + bytecount;
*p++ = dte_facs->calling_len;
memcpy(p, dte_facs->calling_ae, bytecount);
p += bytecount;
}
if (dte_facs->called_len && (facil_mask & X25_MASK_CALLED_AE)) {
unsigned int bytecount = (dte_facs->called_len % 2) ?
dte_facs->called_len / 2 + 1 :
dte_facs->called_len / 2;
*p++ = X25_FAC_CALLED_AE;
*p++ = 1 + bytecount;
*p++ = dte_facs->called_len;
memcpy(p, dte_facs->called_ae, bytecount);
p+=bytecount;
}
len = p - buffer;
buffer[0] = len - 1;
return len;
}
/*
* Try to reach a compromise on a set of facilities.
*
* The only real problem is with reverse charging.
*/
int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
struct x25_facilities *new, struct x25_dte_facilities *dte)
{
struct x25_sock *x25 = x25_sk(sk);
struct x25_facilities *ours = &x25->facilities;
struct x25_facilities theirs;
int len;
memset(&theirs, 0, sizeof(theirs));
memcpy(new, ours, sizeof(*new));
len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
if (len < 0)
return len;
/*
* They want reverse charging, we won't accept it.
*/
if ((theirs.reverse & 0x01 ) && (ours->reverse & 0x01)) {
SOCK_DEBUG(sk, "X.25: rejecting reverse charging request\n");
return -1;
}
new->reverse = theirs.reverse;
if (theirs.throughput) {
int theirs_in = theirs.throughput & 0x0f;
int theirs_out = theirs.throughput & 0xf0;
int ours_in = ours->throughput & 0x0f;
int ours_out = ours->throughput & 0xf0;
if (!ours_in || theirs_in < ours_in) {
SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n");
new->throughput = (new->throughput & 0xf0) | theirs_in;
}
if (!ours_out || theirs_out < ours_out) {
SOCK_DEBUG(sk,
"X.25: outbound throughput negotiated\n");
new->throughput = (new->throughput & 0x0f) | theirs_out;
}
}
if (theirs.pacsize_in && theirs.pacsize_out) {
if (theirs.pacsize_in < ours->pacsize_in) {
SOCK_DEBUG(sk, "X.25: packet size inwards negotiated down\n");
new->pacsize_in = theirs.pacsize_in;
}
if (theirs.pacsize_out < ours->pacsize_out) {
SOCK_DEBUG(sk, "X.25: packet size outwards negotiated down\n");
new->pacsize_out = theirs.pacsize_out;
}
}
if (theirs.winsize_in && theirs.winsize_out) {
if (theirs.winsize_in < ours->winsize_in) {
SOCK_DEBUG(sk, "X.25: window size inwards negotiated down\n");
new->winsize_in = theirs.winsize_in;
}
if (theirs.winsize_out < ours->winsize_out) {
SOCK_DEBUG(sk, "X.25: window size outwards negotiated down\n");
new->winsize_out = theirs.winsize_out;
}
}
return len;
}
/*
* Limit values of certain facilities according to the capability of the
* currently attached x25 link.
*/
void x25_limit_facilities(struct x25_facilities *facilities,
struct x25_neigh *nb)
{
if (!nb->extended) {
if (facilities->winsize_in > 7) {
pr_debug("incoming winsize limited to 7\n");
facilities->winsize_in = 7;
}
if (facilities->winsize_out > 7) {
facilities->winsize_out = 7;
pr_debug("outgoing winsize limited to 7\n");
}
}
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_5060_0 |
crossvul-cpp_data_good_4948_0 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <net/netlink.h>
#include <linux/file.h>
#include <linux/vmalloc.h>
/* bpf_check() is a static code analyzer that walks eBPF program
* instruction by instruction and updates register/stack state.
* All paths of conditional branches are analyzed until 'bpf_exit' insn.
*
* The first pass is depth-first-search to check that the program is a DAG.
* It rejects the following programs:
* - larger than BPF_MAXINSNS insns
* - if loop is present (detected via back-edge)
* - unreachable insns exist (shouldn't be a forest. program = one function)
* - out of bounds or malformed jumps
* The second pass is all possible path descent from the 1st insn.
* Since it's analyzing all pathes through the program, the length of the
* analysis is limited to 32k insn, which may be hit even if total number of
* insn is less then 4K, but there are too many branches that change stack/regs.
* Number of 'branches to be analyzed' is limited to 1k
*
* On entry to each instruction, each register has a type, and the instruction
* changes the types of the registers depending on instruction semantics.
* If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
* copied to R1.
*
* All registers are 64-bit.
* R0 - return register
* R1-R5 argument passing registers
* R6-R9 callee saved registers
* R10 - frame pointer read-only
*
* At the start of BPF program the register R1 contains a pointer to bpf_context
* and has type PTR_TO_CTX.
*
* Verifier tracks arithmetic operations on pointers in case:
* BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
* BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
* 1st insn copies R10 (which has FRAME_PTR) type into R1
* and 2nd arithmetic instruction is pattern matched to recognize
* that it wants to construct a pointer to some element within stack.
* So after 2nd insn, the register R1 has type PTR_TO_STACK
* (and -20 constant is saved for further stack bounds checking).
* Meaning that this reg is a pointer to stack plus known immediate constant.
*
* Most of the time the registers have UNKNOWN_VALUE type, which
* means the register has some value, but it's not a valid pointer.
* (like pointer plus pointer becomes UNKNOWN_VALUE type)
*
* When verifier sees load or store instructions the type of base register
* can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer
* types recognized by check_mem_access() function.
*
* PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
* and the range of [ptr, ptr + map's value_size) is accessible.
*
* registers used to pass values to function calls are checked against
* function argument constraints.
*
* ARG_PTR_TO_MAP_KEY is one of such argument constraints.
* It means that the register type passed to this function must be
* PTR_TO_STACK and it will be used inside the function as
* 'pointer to map element key'
*
* For example the argument constraints for bpf_map_lookup_elem():
* .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
* .arg1_type = ARG_CONST_MAP_PTR,
* .arg2_type = ARG_PTR_TO_MAP_KEY,
*
* ret_type says that this function returns 'pointer to map elem value or null'
* function expects 1st argument to be a const pointer to 'struct bpf_map' and
* 2nd argument should be a pointer to stack, which will be used inside
* the helper function as a pointer to map element key.
*
* On the kernel side the helper function looks like:
* u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
* {
* struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
* void *key = (void *) (unsigned long) r2;
* void *value;
*
* here kernel can access 'key' and 'map' pointers safely, knowing that
* [key, key + map->key_size) bytes are valid and were initialized on
* the stack of eBPF program.
* }
*
* Corresponding eBPF program may look like:
* BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
* BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
* BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
* BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
* here verifier looks at prototype of map_lookup_elem() and sees:
* .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
* Now verifier knows that this map has key of R1->map_ptr->key_size bytes
*
* Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
* Now verifier checks that [R2, R2 + map's key_size) are within stack limits
* and were initialized prior to this call.
* If it's ok, then verifier allows this BPF_CALL insn and looks at
* .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
* R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
* returns ether pointer to map value or NULL.
*
* When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
* insn, the register holding that pointer in the true branch changes state to
* PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
* branch. See check_cond_jmp_op().
*
* After the call R0 is set to return type of the function and registers R1-R5
* are set to NOT_INIT to indicate that they are no longer readable.
*/
/* types of values stored in eBPF registers */
enum bpf_reg_type {
NOT_INIT = 0, /* nothing was written into register */
UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
PTR_TO_CTX, /* reg points to bpf_context */
CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
PTR_TO_MAP_VALUE, /* reg points to map element value */
PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
FRAME_PTR, /* reg == frame_pointer */
PTR_TO_STACK, /* reg == frame_pointer + imm */
CONST_IMM, /* constant integer value */
};
struct reg_state {
enum bpf_reg_type type;
union {
/* valid when type == CONST_IMM | PTR_TO_STACK */
int imm;
/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
* PTR_TO_MAP_VALUE_OR_NULL
*/
struct bpf_map *map_ptr;
};
};
enum bpf_stack_slot_type {
STACK_INVALID, /* nothing was stored in this stack slot */
STACK_SPILL, /* register spilled into stack */
STACK_MISC /* BPF program wrote some data into this slot */
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
/* state of the program:
* type of all registers and stack info
*/
struct verifier_state {
struct reg_state regs[MAX_BPF_REG];
u8 stack_slot_type[MAX_BPF_STACK];
struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
};
/* linked list of verifier states used to prune search */
struct verifier_state_list {
struct verifier_state state;
struct verifier_state_list *next;
};
/* verifier_state + insn_idx are pushed to stack when branch is encountered */
struct verifier_stack_elem {
/* verifer state is 'st'
* before processing instruction 'insn_idx'
* and after processing instruction 'prev_insn_idx'
*/
struct verifier_state st;
int insn_idx;
int prev_insn_idx;
struct verifier_stack_elem *next;
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
/* single container for all structs
* one verifier_env per bpf_check() call
*/
struct verifier_env {
struct bpf_prog *prog; /* eBPF program being verified */
struct verifier_stack_elem *head; /* stack of verifier states to be processed */
int stack_size; /* number of states to be processed */
struct verifier_state cur_state; /* current verifier state */
struct verifier_state_list **explored_states; /* search pruning optimization */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
u32 used_map_cnt; /* number of used maps */
bool allow_ptr_leaks;
};
/* verbose verifier prints what it's seeing
* bpf_check() is called under lock, so no race to access these global vars
*/
static u32 log_level, log_size, log_len;
static char *log_buf;
static DEFINE_MUTEX(bpf_verifier_lock);
/* log_level controls verbosity level of eBPF verifier.
* verbose() is used to dump the verification trace to the log, so the user
* can figure out what's wrong with the program
*/
static __printf(1, 2) void verbose(const char *fmt, ...)
{
va_list args;
if (log_level == 0 || log_len >= log_size - 1)
return;
va_start(args, fmt);
log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args);
va_end(args);
}
/* string representation of 'enum bpf_reg_type' */
static const char * const reg_type_str[] = {
[NOT_INIT] = "?",
[UNKNOWN_VALUE] = "inv",
[PTR_TO_CTX] = "ctx",
[CONST_PTR_TO_MAP] = "map_ptr",
[PTR_TO_MAP_VALUE] = "map_value",
[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
[FRAME_PTR] = "fp",
[PTR_TO_STACK] = "fp",
[CONST_IMM] = "imm",
};
static const struct {
int map_type;
int func_id;
} func_limit[] = {
{BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
};
static void print_verifier_state(struct verifier_env *env)
{
enum bpf_reg_type t;
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
t = env->cur_state.regs[i].type;
if (t == NOT_INIT)
continue;
verbose(" R%d=%s", i, reg_type_str[t]);
if (t == CONST_IMM || t == PTR_TO_STACK)
verbose("%d", env->cur_state.regs[i].imm);
else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE ||
t == PTR_TO_MAP_VALUE_OR_NULL)
verbose("(ks=%d,vs=%d)",
env->cur_state.regs[i].map_ptr->key_size,
env->cur_state.regs[i].map_ptr->value_size);
}
for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
if (env->cur_state.stack_slot_type[i] == STACK_SPILL)
verbose(" fp%d=%s", -MAX_BPF_STACK + i,
reg_type_str[env->cur_state.spilled_regs[i / BPF_REG_SIZE].type]);
}
verbose("\n");
}
static const char *const bpf_class_string[] = {
[BPF_LD] = "ld",
[BPF_LDX] = "ldx",
[BPF_ST] = "st",
[BPF_STX] = "stx",
[BPF_ALU] = "alu",
[BPF_JMP] = "jmp",
[BPF_RET] = "BUG",
[BPF_ALU64] = "alu64",
};
static const char *const bpf_alu_string[16] = {
[BPF_ADD >> 4] = "+=",
[BPF_SUB >> 4] = "-=",
[BPF_MUL >> 4] = "*=",
[BPF_DIV >> 4] = "/=",
[BPF_OR >> 4] = "|=",
[BPF_AND >> 4] = "&=",
[BPF_LSH >> 4] = "<<=",
[BPF_RSH >> 4] = ">>=",
[BPF_NEG >> 4] = "neg",
[BPF_MOD >> 4] = "%=",
[BPF_XOR >> 4] = "^=",
[BPF_MOV >> 4] = "=",
[BPF_ARSH >> 4] = "s>>=",
[BPF_END >> 4] = "endian",
};
static const char *const bpf_ldst_string[] = {
[BPF_W >> 3] = "u32",
[BPF_H >> 3] = "u16",
[BPF_B >> 3] = "u8",
[BPF_DW >> 3] = "u64",
};
static const char *const bpf_jmp_string[16] = {
[BPF_JA >> 4] = "jmp",
[BPF_JEQ >> 4] = "==",
[BPF_JGT >> 4] = ">",
[BPF_JGE >> 4] = ">=",
[BPF_JSET >> 4] = "&",
[BPF_JNE >> 4] = "!=",
[BPF_JSGT >> 4] = "s>",
[BPF_JSGE >> 4] = "s>=",
[BPF_CALL >> 4] = "call",
[BPF_EXIT >> 4] = "exit",
};
static void print_bpf_insn(struct bpf_insn *insn)
{
u8 class = BPF_CLASS(insn->code);
if (class == BPF_ALU || class == BPF_ALU64) {
if (BPF_SRC(insn->code) == BPF_X)
verbose("(%02x) %sr%d %s %sr%d\n",
insn->code, class == BPF_ALU ? "(u32) " : "",
insn->dst_reg,
bpf_alu_string[BPF_OP(insn->code) >> 4],
class == BPF_ALU ? "(u32) " : "",
insn->src_reg);
else
verbose("(%02x) %sr%d %s %s%d\n",
insn->code, class == BPF_ALU ? "(u32) " : "",
insn->dst_reg,
bpf_alu_string[BPF_OP(insn->code) >> 4],
class == BPF_ALU ? "(u32) " : "",
insn->imm);
} else if (class == BPF_STX) {
if (BPF_MODE(insn->code) == BPF_MEM)
verbose("(%02x) *(%s *)(r%d %+d) = r%d\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg,
insn->off, insn->src_reg);
else if (BPF_MODE(insn->code) == BPF_XADD)
verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg, insn->off,
insn->src_reg);
else
verbose("BUG_%02x\n", insn->code);
} else if (class == BPF_ST) {
if (BPF_MODE(insn->code) != BPF_MEM) {
verbose("BUG_st_%02x\n", insn->code);
return;
}
verbose("(%02x) *(%s *)(r%d %+d) = %d\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg,
insn->off, insn->imm);
} else if (class == BPF_LDX) {
if (BPF_MODE(insn->code) != BPF_MEM) {
verbose("BUG_ldx_%02x\n", insn->code);
return;
}
verbose("(%02x) r%d = *(%s *)(r%d %+d)\n",
insn->code, insn->dst_reg,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->src_reg, insn->off);
} else if (class == BPF_LD) {
if (BPF_MODE(insn->code) == BPF_ABS) {
verbose("(%02x) r0 = *(%s *)skb[%d]\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->imm);
} else if (BPF_MODE(insn->code) == BPF_IND) {
verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->src_reg, insn->imm);
} else if (BPF_MODE(insn->code) == BPF_IMM) {
verbose("(%02x) r%d = 0x%x\n",
insn->code, insn->dst_reg, insn->imm);
} else {
verbose("BUG_ld_%02x\n", insn->code);
return;
}
} else if (class == BPF_JMP) {
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
verbose("(%02x) call %d\n", insn->code, insn->imm);
} else if (insn->code == (BPF_JMP | BPF_JA)) {
verbose("(%02x) goto pc%+d\n",
insn->code, insn->off);
} else if (insn->code == (BPF_JMP | BPF_EXIT)) {
verbose("(%02x) exit\n", insn->code);
} else if (BPF_SRC(insn->code) == BPF_X) {
verbose("(%02x) if r%d %s r%d goto pc%+d\n",
insn->code, insn->dst_reg,
bpf_jmp_string[BPF_OP(insn->code) >> 4],
insn->src_reg, insn->off);
} else {
verbose("(%02x) if r%d %s 0x%x goto pc%+d\n",
insn->code, insn->dst_reg,
bpf_jmp_string[BPF_OP(insn->code) >> 4],
insn->imm, insn->off);
}
} else {
verbose("(%02x) %s\n", insn->code, bpf_class_string[class]);
}
}
static int pop_stack(struct verifier_env *env, int *prev_insn_idx)
{
struct verifier_stack_elem *elem;
int insn_idx;
if (env->head == NULL)
return -1;
memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state));
insn_idx = env->head->insn_idx;
if (prev_insn_idx)
*prev_insn_idx = env->head->prev_insn_idx;
elem = env->head->next;
kfree(env->head);
env->head = elem;
env->stack_size--;
return insn_idx;
}
static struct verifier_state *push_stack(struct verifier_env *env, int insn_idx,
int prev_insn_idx)
{
struct verifier_stack_elem *elem;
elem = kmalloc(sizeof(struct verifier_stack_elem), GFP_KERNEL);
if (!elem)
goto err;
memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state));
elem->insn_idx = insn_idx;
elem->prev_insn_idx = prev_insn_idx;
elem->next = env->head;
env->head = elem;
env->stack_size++;
if (env->stack_size > 1024) {
verbose("BPF program is too complex\n");
goto err;
}
return &elem->st;
err:
/* pop all elements and return */
while (pop_stack(env, NULL) >= 0);
return NULL;
}
#define CALLER_SAVED_REGS 6
static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};
static void init_reg_state(struct reg_state *regs)
{
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
regs[i].type = NOT_INIT;
regs[i].imm = 0;
regs[i].map_ptr = NULL;
}
/* frame pointer */
regs[BPF_REG_FP].type = FRAME_PTR;
/* 1st arg to a function */
regs[BPF_REG_1].type = PTR_TO_CTX;
}
static void mark_reg_unknown_value(struct reg_state *regs, u32 regno)
{
BUG_ON(regno >= MAX_BPF_REG);
regs[regno].type = UNKNOWN_VALUE;
regs[regno].imm = 0;
regs[regno].map_ptr = NULL;
}
enum reg_arg_type {
SRC_OP, /* register is used as source operand */
DST_OP, /* register is used as destination operand */
DST_OP_NO_MARK /* same as above, check only, don't mark */
};
static int check_reg_arg(struct reg_state *regs, u32 regno,
enum reg_arg_type t)
{
if (regno >= MAX_BPF_REG) {
verbose("R%d is invalid\n", regno);
return -EINVAL;
}
if (t == SRC_OP) {
/* check whether register used as source operand can be read */
if (regs[regno].type == NOT_INIT) {
verbose("R%d !read_ok\n", regno);
return -EACCES;
}
} else {
/* check whether register used as dest operand can be written to */
if (regno == BPF_REG_FP) {
verbose("frame pointer is read only\n");
return -EACCES;
}
if (t == DST_OP)
mark_reg_unknown_value(regs, regno);
}
return 0;
}
static int bpf_size_to_bytes(int bpf_size)
{
if (bpf_size == BPF_W)
return 4;
else if (bpf_size == BPF_H)
return 2;
else if (bpf_size == BPF_B)
return 1;
else if (bpf_size == BPF_DW)
return 8;
else
return -EINVAL;
}
static bool is_spillable_regtype(enum bpf_reg_type type)
{
switch (type) {
case PTR_TO_MAP_VALUE:
case PTR_TO_MAP_VALUE_OR_NULL:
case PTR_TO_STACK:
case PTR_TO_CTX:
case FRAME_PTR:
case CONST_PTR_TO_MAP:
return true;
default:
return false;
}
}
/* check_stack_read/write functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
static int check_stack_write(struct verifier_state *state, int off, int size,
int value_regno)
{
int i;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
* so it's aligned access and [off, off + size) are within stack limits
*/
if (value_regno >= 0 &&
is_spillable_regtype(state->regs[value_regno].type)) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
verbose("invalid size of register spill\n");
return -EACCES;
}
/* save register state */
state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
state->regs[value_regno];
for (i = 0; i < BPF_REG_SIZE; i++)
state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
} else {
/* regular write of data into stack */
state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
(struct reg_state) {};
for (i = 0; i < size; i++)
state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
}
return 0;
}
static int check_stack_read(struct verifier_state *state, int off, int size,
int value_regno)
{
u8 *slot_type;
int i;
slot_type = &state->stack_slot_type[MAX_BPF_STACK + off];
if (slot_type[0] == STACK_SPILL) {
if (size != BPF_REG_SIZE) {
verbose("invalid size of register spill\n");
return -EACCES;
}
for (i = 1; i < BPF_REG_SIZE; i++) {
if (slot_type[i] != STACK_SPILL) {
verbose("corrupted spill memory\n");
return -EACCES;
}
}
if (value_regno >= 0)
/* restore register state from stack */
state->regs[value_regno] =
state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE];
return 0;
} else {
for (i = 0; i < size; i++) {
if (slot_type[i] != STACK_MISC) {
verbose("invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
}
}
if (value_regno >= 0)
/* have read misc data from the stack */
mark_reg_unknown_value(state->regs, value_regno);
return 0;
}
}
/* check read/write into map element returned by bpf_map_lookup_elem() */
static int check_map_access(struct verifier_env *env, u32 regno, int off,
int size)
{
struct bpf_map *map = env->cur_state.regs[regno].map_ptr;
if (off < 0 || off + size > map->value_size) {
verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
map->value_size, off, size);
return -EACCES;
}
return 0;
}
/* check access to 'struct bpf_context' fields */
static int check_ctx_access(struct verifier_env *env, int off, int size,
enum bpf_access_type t)
{
if (env->prog->aux->ops->is_valid_access &&
env->prog->aux->ops->is_valid_access(off, size, t))
return 0;
verbose("invalid bpf_context access off=%d size=%d\n", off, size);
return -EACCES;
}
static bool is_pointer_value(struct verifier_env *env, int regno)
{
if (env->allow_ptr_leaks)
return false;
switch (env->cur_state.regs[regno].type) {
case UNKNOWN_VALUE:
case CONST_IMM:
return false;
default:
return true;
}
}
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
* if t==write && value_regno==-1, some unknown value is stored into memory
* if t==read && value_regno==-1, don't care what we read from memory
*/
static int check_mem_access(struct verifier_env *env, u32 regno, int off,
int bpf_size, enum bpf_access_type t,
int value_regno)
{
struct verifier_state *state = &env->cur_state;
int size, err = 0;
if (state->regs[regno].type == PTR_TO_STACK)
off += state->regs[regno].imm;
size = bpf_size_to_bytes(bpf_size);
if (size < 0)
return size;
if (off % size != 0) {
verbose("misaligned access off %d size %d\n", off, size);
return -EACCES;
}
if (state->regs[regno].type == PTR_TO_MAP_VALUE) {
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose("R%d leaks addr into map\n", value_regno);
return -EACCES;
}
err = check_map_access(env, regno, off, size);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown_value(state->regs, value_regno);
} else if (state->regs[regno].type == PTR_TO_CTX) {
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose("R%d leaks addr into ctx\n", value_regno);
return -EACCES;
}
err = check_ctx_access(env, off, size, t);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown_value(state->regs, value_regno);
} else if (state->regs[regno].type == FRAME_PTR ||
state->regs[regno].type == PTR_TO_STACK) {
if (off >= 0 || off < -MAX_BPF_STACK) {
verbose("invalid stack off=%d size=%d\n", off, size);
return -EACCES;
}
if (t == BPF_WRITE) {
if (!env->allow_ptr_leaks &&
state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL &&
size != BPF_REG_SIZE) {
verbose("attempt to corrupt spilled pointer on stack\n");
return -EACCES;
}
err = check_stack_write(state, off, size, value_regno);
} else {
err = check_stack_read(state, off, size, value_regno);
}
} else {
verbose("R%d invalid mem access '%s'\n",
regno, reg_type_str[state->regs[regno].type]);
return -EACCES;
}
return err;
}
static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
{
struct reg_state *regs = env->cur_state.regs;
int err;
if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
insn->imm != 0) {
verbose("BPF_XADD uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
/* check src2 operand */
err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
if (err)
return err;
/* check whether atomic_add can read the memory */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1);
if (err)
return err;
/* check whether atomic_add can write into the same memory */
return check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, -1);
}
/* when register 'regno' is passed into function that will read 'access_size'
* bytes from that pointer, make sure that it's within stack boundary
* and all elements of stack are initialized
*/
static int check_stack_boundary(struct verifier_env *env,
int regno, int access_size)
{
struct verifier_state *state = &env->cur_state;
struct reg_state *regs = state->regs;
int off, i;
if (regs[regno].type != PTR_TO_STACK)
return -EACCES;
off = regs[regno].imm;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
access_size <= 0) {
verbose("invalid stack type R%d off=%d access_size=%d\n",
regno, off, access_size);
return -EACCES;
}
for (i = 0; i < access_size; i++) {
if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
verbose("invalid indirect read from stack off %d+%d size %d\n",
off, i, access_size);
return -EACCES;
}
}
return 0;
}
static int check_func_arg(struct verifier_env *env, u32 regno,
enum bpf_arg_type arg_type, struct bpf_map **mapp)
{
struct reg_state *reg = env->cur_state.regs + regno;
enum bpf_reg_type expected_type;
int err = 0;
if (arg_type == ARG_DONTCARE)
return 0;
if (reg->type == NOT_INIT) {
verbose("R%d !read_ok\n", regno);
return -EACCES;
}
if (arg_type == ARG_ANYTHING) {
if (is_pointer_value(env, regno)) {
verbose("R%d leaks addr into helper function\n", regno);
return -EACCES;
}
return 0;
}
if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
arg_type == ARG_PTR_TO_MAP_VALUE) {
expected_type = PTR_TO_STACK;
} else if (arg_type == ARG_CONST_STACK_SIZE) {
expected_type = CONST_IMM;
} else if (arg_type == ARG_CONST_MAP_PTR) {
expected_type = CONST_PTR_TO_MAP;
} else if (arg_type == ARG_PTR_TO_CTX) {
expected_type = PTR_TO_CTX;
} else {
verbose("unsupported arg_type %d\n", arg_type);
return -EFAULT;
}
if (reg->type != expected_type) {
verbose("R%d type=%s expected=%s\n", regno,
reg_type_str[reg->type], reg_type_str[expected_type]);
return -EACCES;
}
if (arg_type == ARG_CONST_MAP_PTR) {
/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
*mapp = reg->map_ptr;
} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
/* bpf_map_xxx(..., map_ptr, ..., key) call:
* check that [key, key + map->key_size) are within
* stack limits and initialized
*/
if (!*mapp) {
/* in function declaration map_ptr must come before
* map_key, so that it's verified and known before
* we have to check map_key here. Otherwise it means
* that kernel subsystem misconfigured verifier
*/
verbose("invalid map_ptr to access map->key\n");
return -EACCES;
}
err = check_stack_boundary(env, regno, (*mapp)->key_size);
} else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
/* bpf_map_xxx(..., map_ptr, ..., value) call:
* check [value, value + map->value_size) validity
*/
if (!*mapp) {
/* kernel subsystem misconfigured verifier */
verbose("invalid map_ptr to access map->value\n");
return -EACCES;
}
err = check_stack_boundary(env, regno, (*mapp)->value_size);
} else if (arg_type == ARG_CONST_STACK_SIZE) {
/* bpf_xxx(..., buf, len) call will access 'len' bytes
* from stack pointer 'buf'. Check it
* note: regno == len, regno - 1 == buf
*/
if (regno == 0) {
/* kernel subsystem misconfigured verifier */
verbose("ARG_CONST_STACK_SIZE cannot be first argument\n");
return -EACCES;
}
err = check_stack_boundary(env, regno - 1, reg->imm);
}
return err;
}
static int check_map_func_compatibility(struct bpf_map *map, int func_id)
{
bool bool_map, bool_func;
int i;
if (!map)
return 0;
for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
bool_map = (map->map_type == func_limit[i].map_type);
bool_func = (func_id == func_limit[i].func_id);
/* only when map & func pair match it can continue.
* don't allow any other map type to be passed into
* the special func;
*/
if (bool_func && bool_map != bool_func)
return -EINVAL;
}
return 0;
}
static int check_call(struct verifier_env *env, int func_id)
{
struct verifier_state *state = &env->cur_state;
const struct bpf_func_proto *fn = NULL;
struct reg_state *regs = state->regs;
struct bpf_map *map = NULL;
struct reg_state *reg;
int i, err;
/* find function prototype */
if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
verbose("invalid func %d\n", func_id);
return -EINVAL;
}
if (env->prog->aux->ops->get_func_proto)
fn = env->prog->aux->ops->get_func_proto(func_id);
if (!fn) {
verbose("unknown func %d\n", func_id);
return -EINVAL;
}
/* eBPF programs must be GPL compatible to use GPL-ed functions */
if (!env->prog->gpl_compatible && fn->gpl_only) {
verbose("cannot call GPL only function from proprietary program\n");
return -EINVAL;
}
/* check args */
err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &map);
if (err)
return err;
err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &map);
if (err)
return err;
err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &map);
if (err)
return err;
err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &map);
if (err)
return err;
err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &map);
if (err)
return err;
/* reset caller saved regs */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
reg = regs + caller_saved[i];
reg->type = NOT_INIT;
reg->imm = 0;
}
/* update return register */
if (fn->ret_type == RET_INTEGER) {
regs[BPF_REG_0].type = UNKNOWN_VALUE;
} else if (fn->ret_type == RET_VOID) {
regs[BPF_REG_0].type = NOT_INIT;
} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
/* remember map_ptr, so that check_map_access()
* can check 'value_size' boundary of memory access
* to map element returned from bpf_map_lookup_elem()
*/
if (map == NULL) {
verbose("kernel subsystem misconfigured verifier\n");
return -EINVAL;
}
regs[BPF_REG_0].map_ptr = map;
} else {
verbose("unknown return type %d of func %d\n",
fn->ret_type, func_id);
return -EINVAL;
}
err = check_map_func_compatibility(map, func_id);
if (err)
return err;
return 0;
}
/* check validity of 32-bit and 64-bit arithmetic operations */
static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
{
struct reg_state *regs = env->cur_state.regs;
u8 opcode = BPF_OP(insn->code);
int err;
if (opcode == BPF_END || opcode == BPF_NEG) {
if (opcode == BPF_NEG) {
if (BPF_SRC(insn->code) != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->off != 0 || insn->imm != 0) {
verbose("BPF_NEG uses reserved fields\n");
return -EINVAL;
}
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
(insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
verbose("BPF_END uses reserved fields\n");
return -EINVAL;
}
}
/* check src operand */
err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->dst_reg)) {
verbose("R%d pointer arithmetic prohibited\n",
insn->dst_reg);
return -EACCES;
}
/* check dest operand */
err = check_reg_arg(regs, insn->dst_reg, DST_OP);
if (err)
return err;
} else if (opcode == BPF_MOV) {
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0 || insn->off != 0) {
verbose("BPF_MOV uses reserved fields\n");
return -EINVAL;
}
/* check src operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
verbose("BPF_MOV uses reserved fields\n");
return -EINVAL;
}
}
/* check dest operand */
err = check_reg_arg(regs, insn->dst_reg, DST_OP);
if (err)
return err;
if (BPF_SRC(insn->code) == BPF_X) {
if (BPF_CLASS(insn->code) == BPF_ALU64) {
/* case: R1 = R2
* copy register state to dest reg
*/
regs[insn->dst_reg] = regs[insn->src_reg];
} else {
if (is_pointer_value(env, insn->src_reg)) {
verbose("R%d partial copy of pointer\n",
insn->src_reg);
return -EACCES;
}
regs[insn->dst_reg].type = UNKNOWN_VALUE;
regs[insn->dst_reg].map_ptr = NULL;
}
} else {
/* case: R = imm
* remember the value we stored into this reg
*/
regs[insn->dst_reg].type = CONST_IMM;
regs[insn->dst_reg].imm = insn->imm;
}
} else if (opcode > BPF_END) {
verbose("invalid BPF_ALU opcode %x\n", opcode);
return -EINVAL;
} else { /* all other ALU ops: and, sub, xor, add, ... */
bool stack_relative = false;
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0 || insn->off != 0) {
verbose("BPF_ALU uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
verbose("BPF_ALU uses reserved fields\n");
return -EINVAL;
}
}
/* check src2 operand */
err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
if (err)
return err;
if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
verbose("div by zero\n");
return -EINVAL;
}
if ((opcode == BPF_LSH || opcode == BPF_RSH ||
opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
if (insn->imm < 0 || insn->imm >= size) {
verbose("invalid shift %d\n", insn->imm);
return -EINVAL;
}
}
/* pattern match 'bpf_add Rx, imm' instruction */
if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
regs[insn->dst_reg].type == FRAME_PTR &&
BPF_SRC(insn->code) == BPF_K) {
stack_relative = true;
} else if (is_pointer_value(env, insn->dst_reg)) {
verbose("R%d pointer arithmetic prohibited\n",
insn->dst_reg);
return -EACCES;
} else if (BPF_SRC(insn->code) == BPF_X &&
is_pointer_value(env, insn->src_reg)) {
verbose("R%d pointer arithmetic prohibited\n",
insn->src_reg);
return -EACCES;
}
/* check dest operand */
err = check_reg_arg(regs, insn->dst_reg, DST_OP);
if (err)
return err;
if (stack_relative) {
regs[insn->dst_reg].type = PTR_TO_STACK;
regs[insn->dst_reg].imm = insn->imm;
}
}
return 0;
}
static int check_cond_jmp_op(struct verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{
struct reg_state *regs = env->cur_state.regs;
struct verifier_state *other_branch;
u8 opcode = BPF_OP(insn->code);
int err;
if (opcode > BPF_EXIT) {
verbose("invalid BPF_JMP opcode %x\n", opcode);
return -EINVAL;
}
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0) {
verbose("BPF_JMP uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->src_reg)) {
verbose("R%d pointer comparison prohibited\n",
insn->src_reg);
return -EACCES;
}
} else {
if (insn->src_reg != BPF_REG_0) {
verbose("BPF_JMP uses reserved fields\n");
return -EINVAL;
}
}
/* check src2 operand */
err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
if (err)
return err;
/* detect if R == 0 where R was initialized to zero earlier */
if (BPF_SRC(insn->code) == BPF_K &&
(opcode == BPF_JEQ || opcode == BPF_JNE) &&
regs[insn->dst_reg].type == CONST_IMM &&
regs[insn->dst_reg].imm == insn->imm) {
if (opcode == BPF_JEQ) {
/* if (imm == imm) goto pc+off;
* only follow the goto, ignore fall-through
*/
*insn_idx += insn->off;
return 0;
} else {
/* if (imm != imm) goto pc+off;
* only follow fall-through branch, since
* that's where the program will go
*/
return 0;
}
}
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
if (!other_branch)
return -EFAULT;
/* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */
if (BPF_SRC(insn->code) == BPF_K &&
insn->imm == 0 && (opcode == BPF_JEQ ||
opcode == BPF_JNE) &&
regs[insn->dst_reg].type == PTR_TO_MAP_VALUE_OR_NULL) {
if (opcode == BPF_JEQ) {
/* next fallthrough insn can access memory via
* this register
*/
regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
/* branch targer cannot access it, since reg == 0 */
other_branch->regs[insn->dst_reg].type = CONST_IMM;
other_branch->regs[insn->dst_reg].imm = 0;
} else {
other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
regs[insn->dst_reg].type = CONST_IMM;
regs[insn->dst_reg].imm = 0;
}
} else if (is_pointer_value(env, insn->dst_reg)) {
verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
return -EACCES;
} else if (BPF_SRC(insn->code) == BPF_K &&
(opcode == BPF_JEQ || opcode == BPF_JNE)) {
if (opcode == BPF_JEQ) {
/* detect if (R == imm) goto
* and in the target state recognize that R = imm
*/
other_branch->regs[insn->dst_reg].type = CONST_IMM;
other_branch->regs[insn->dst_reg].imm = insn->imm;
} else {
/* detect if (R != imm) goto
* and in the fall-through state recognize that R = imm
*/
regs[insn->dst_reg].type = CONST_IMM;
regs[insn->dst_reg].imm = insn->imm;
}
}
if (log_level)
print_verifier_state(env);
return 0;
}
/* return the map pointer stored inside BPF_LD_IMM64 instruction */
static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
{
u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
return (struct bpf_map *) (unsigned long) imm64;
}
/* verify BPF_LD_IMM64 instruction */
static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn)
{
struct reg_state *regs = env->cur_state.regs;
int err;
if (BPF_SIZE(insn->code) != BPF_DW) {
verbose("invalid BPF_LD_IMM insn\n");
return -EINVAL;
}
if (insn->off != 0) {
verbose("BPF_LD_IMM64 uses reserved fields\n");
return -EINVAL;
}
err = check_reg_arg(regs, insn->dst_reg, DST_OP);
if (err)
return err;
if (insn->src_reg == 0)
/* generic move 64-bit immediate into a register */
return 0;
/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
return 0;
}
static bool may_access_skb(enum bpf_prog_type type)
{
switch (type) {
case BPF_PROG_TYPE_SOCKET_FILTER:
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
return true;
default:
return false;
}
}
/* verify safety of LD_ABS|LD_IND instructions:
* - they can only appear in the programs where ctx == skb
* - since they are wrappers of function calls, they scratch R1-R5 registers,
* preserve R6-R9, and store return value into R0
*
* Implicit input:
* ctx == skb == R6 == CTX
*
* Explicit input:
* SRC == any register
* IMM == 32-bit immediate
*
* Output:
* R0 - 8/16/32-bit skb data converted to cpu endianness
*/
static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
{
struct reg_state *regs = env->cur_state.regs;
u8 mode = BPF_MODE(insn->code);
struct reg_state *reg;
int i, err;
if (!may_access_skb(env->prog->type)) {
verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n");
return -EINVAL;
}
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
verbose("BPF_LD_ABS uses reserved fields\n");
return -EINVAL;
}
/* check whether implicit source operand (register R6) is readable */
err = check_reg_arg(regs, BPF_REG_6, SRC_OP);
if (err)
return err;
if (regs[BPF_REG_6].type != PTR_TO_CTX) {
verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
return -EINVAL;
}
if (mode == BPF_IND) {
/* check explicit source operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
}
/* reset caller saved regs to unreadable */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
reg = regs + caller_saved[i];
reg->type = NOT_INIT;
reg->imm = 0;
}
/* mark destination R0 register as readable, since it contains
* the value fetched from the packet
*/
regs[BPF_REG_0].type = UNKNOWN_VALUE;
return 0;
}
/* non-recursive DFS pseudo code
* 1 procedure DFS-iterative(G,v):
* 2 label v as discovered
* 3 let S be a stack
* 4 S.push(v)
* 5 while S is not empty
* 6 t <- S.pop()
* 7 if t is what we're looking for:
* 8 return t
* 9 for all edges e in G.adjacentEdges(t) do
* 10 if edge e is already labelled
* 11 continue with the next edge
* 12 w <- G.adjacentVertex(t,e)
* 13 if vertex w is not discovered and not explored
* 14 label e as tree-edge
* 15 label w as discovered
* 16 S.push(w)
* 17 continue at 5
* 18 else if vertex w is discovered
* 19 label e as back-edge
* 20 else
* 21 // vertex w is explored
* 22 label e as forward- or cross-edge
* 23 label t as explored
* 24 S.pop()
*
* convention:
* 0x10 - discovered
* 0x11 - discovered and fall-through edge labelled
* 0x12 - discovered and fall-through and branch edges labelled
* 0x20 - explored
*/
enum {
DISCOVERED = 0x10,
EXPLORED = 0x20,
FALLTHROUGH = 1,
BRANCH = 2,
};
#define STATE_LIST_MARK ((struct verifier_state_list *) -1L)
static int *insn_stack; /* stack of insns to process */
static int cur_stack; /* current stack index */
static int *insn_state;
/* t, w, e - match pseudo-code above:
* t - index of current instruction
* w - next instruction
* e - edge
*/
static int push_insn(int t, int w, int e, struct verifier_env *env)
{
if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
return 0;
if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
return 0;
if (w < 0 || w >= env->prog->len) {
verbose("jump out of range from insn %d to %d\n", t, w);
return -EINVAL;
}
if (e == BRANCH)
/* mark branch target for state pruning */
env->explored_states[w] = STATE_LIST_MARK;
if (insn_state[w] == 0) {
/* tree-edge */
insn_state[t] = DISCOVERED | e;
insn_state[w] = DISCOVERED;
if (cur_stack >= env->prog->len)
return -E2BIG;
insn_stack[cur_stack++] = w;
return 1;
} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
verbose("back-edge from insn %d to %d\n", t, w);
return -EINVAL;
} else if (insn_state[w] == EXPLORED) {
/* forward- or cross-edge */
insn_state[t] = DISCOVERED | e;
} else {
verbose("insn state internal bug\n");
return -EFAULT;
}
return 0;
}
/* non-recursive depth-first-search to detect loops in BPF program
* loop == back-edge in directed graph
*/
static int check_cfg(struct verifier_env *env)
{
struct bpf_insn *insns = env->prog->insnsi;
int insn_cnt = env->prog->len;
int ret = 0;
int i, t;
insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_state)
return -ENOMEM;
insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_stack) {
kfree(insn_state);
return -ENOMEM;
}
insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
insn_stack[0] = 0; /* 0 is the first instruction */
cur_stack = 1;
peek_stack:
if (cur_stack == 0)
goto check_state;
t = insn_stack[cur_stack - 1];
if (BPF_CLASS(insns[t].code) == BPF_JMP) {
u8 opcode = BPF_OP(insns[t].code);
if (opcode == BPF_EXIT) {
goto mark_explored;
} else if (opcode == BPF_CALL) {
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
} else if (opcode == BPF_JA) {
if (BPF_SRC(insns[t].code) != BPF_K) {
ret = -EINVAL;
goto err_free;
}
/* unconditional jump with single edge */
ret = push_insn(t, t + insns[t].off + 1,
FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
/* tell verifier to check for equivalent states
* after every call and jump
*/
if (t + 1 < insn_cnt)
env->explored_states[t + 1] = STATE_LIST_MARK;
} else {
/* conditional jump with two edges */
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
}
} else {
/* all other non-branch instructions with single
* fall-through edge
*/
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
}
mark_explored:
insn_state[t] = EXPLORED;
if (cur_stack-- <= 0) {
verbose("pop stack internal bug\n");
ret = -EFAULT;
goto err_free;
}
goto peek_stack;
check_state:
for (i = 0; i < insn_cnt; i++) {
if (insn_state[i] != EXPLORED) {
verbose("unreachable insn %d\n", i);
ret = -EINVAL;
goto err_free;
}
}
ret = 0; /* cfg looks good */
err_free:
kfree(insn_state);
kfree(insn_stack);
return ret;
}
/* compare two verifier states
*
* all states stored in state_list are known to be valid, since
* verifier reached 'bpf_exit' instruction through them
*
* this function is called when verifier exploring different branches of
* execution popped from the state stack. If it sees an old state that has
* more strict register state and more strict stack state then this execution
* branch doesn't need to be explored further, since verifier already
* concluded that more strict state leads to valid finish.
*
* Therefore two states are equivalent if register state is more conservative
* and explored stack state is more conservative than the current one.
* Example:
* explored current
* (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
* (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
*
* In other words if current stack state (one being explored) has more
* valid slots than old one that already passed validation, it means
* the verifier can stop exploring and conclude that current state is valid too
*
* Similarly with registers. If explored state has register type as invalid
* whereas register type in current state is meaningful, it means that
* the current state will reach 'bpf_exit' instruction safely
*/
static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
{
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
if (memcmp(&old->regs[i], &cur->regs[i],
sizeof(old->regs[0])) != 0) {
if (old->regs[i].type == NOT_INIT ||
(old->regs[i].type == UNKNOWN_VALUE &&
cur->regs[i].type != NOT_INIT))
continue;
return false;
}
}
for (i = 0; i < MAX_BPF_STACK; i++) {
if (old->stack_slot_type[i] == STACK_INVALID)
continue;
if (old->stack_slot_type[i] != cur->stack_slot_type[i])
/* Ex: old explored (safe) state has STACK_SPILL in
* this stack slot, but current has has STACK_MISC ->
* this verifier states are not equivalent,
* return false to continue verification of this path
*/
return false;
if (i % BPF_REG_SIZE)
continue;
if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE],
&cur->spilled_regs[i / BPF_REG_SIZE],
sizeof(old->spilled_regs[0])))
/* when explored and current stack slot types are
* the same, check that stored pointers types
* are the same as well.
* Ex: explored safe path could have stored
* (struct reg_state) {.type = PTR_TO_STACK, .imm = -8}
* but current path has stored:
* (struct reg_state) {.type = PTR_TO_STACK, .imm = -16}
* such verifier states are not equivalent.
* return false to continue verification of this path
*/
return false;
else
continue;
}
return true;
}
static int is_state_visited(struct verifier_env *env, int insn_idx)
{
struct verifier_state_list *new_sl;
struct verifier_state_list *sl;
sl = env->explored_states[insn_idx];
if (!sl)
/* this 'insn_idx' instruction wasn't marked, so we will not
* be doing state search here
*/
return 0;
while (sl != STATE_LIST_MARK) {
if (states_equal(&sl->state, &env->cur_state))
/* reached equivalent register/stack state,
* prune the search
*/
return 1;
sl = sl->next;
}
/* there were no equivalent states, remember current one.
* technically the current state is not proven to be safe yet,
* but it will either reach bpf_exit (which means it's safe) or
* it will be rejected. Since there are no loops, we won't be
* seeing this 'insn_idx' instruction again on the way to bpf_exit
*/
new_sl = kmalloc(sizeof(struct verifier_state_list), GFP_USER);
if (!new_sl)
return -ENOMEM;
/* add new state to the head of linked list */
memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state));
new_sl->next = env->explored_states[insn_idx];
env->explored_states[insn_idx] = new_sl;
return 0;
}
static int do_check(struct verifier_env *env)
{
struct verifier_state *state = &env->cur_state;
struct bpf_insn *insns = env->prog->insnsi;
struct reg_state *regs = state->regs;
int insn_cnt = env->prog->len;
int insn_idx, prev_insn_idx = 0;
int insn_processed = 0;
bool do_print_state = false;
init_reg_state(regs);
insn_idx = 0;
for (;;) {
struct bpf_insn *insn;
u8 class;
int err;
if (insn_idx >= insn_cnt) {
verbose("invalid insn idx %d insn_cnt %d\n",
insn_idx, insn_cnt);
return -EFAULT;
}
insn = &insns[insn_idx];
class = BPF_CLASS(insn->code);
if (++insn_processed > 32768) {
verbose("BPF program is too large. Proccessed %d insn\n",
insn_processed);
return -E2BIG;
}
err = is_state_visited(env, insn_idx);
if (err < 0)
return err;
if (err == 1) {
/* found equivalent state, can prune the search */
if (log_level) {
if (do_print_state)
verbose("\nfrom %d to %d: safe\n",
prev_insn_idx, insn_idx);
else
verbose("%d: safe\n", insn_idx);
}
goto process_bpf_exit;
}
if (log_level && do_print_state) {
verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx);
print_verifier_state(env);
do_print_state = false;
}
if (log_level) {
verbose("%d: ", insn_idx);
print_bpf_insn(insn);
}
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
return err;
} else if (class == BPF_LDX) {
enum bpf_reg_type src_reg_type;
/* check for reserved fields is already done */
/* check src operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK);
if (err)
return err;
src_reg_type = regs[insn->src_reg].type;
/* check that memory (src_reg + off) is readable,
* the state of dst_reg will be updated by this func
*/
err = check_mem_access(env, insn->src_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ,
insn->dst_reg);
if (err)
return err;
if (BPF_SIZE(insn->code) != BPF_W) {
insn_idx++;
continue;
}
if (insn->imm == 0) {
/* saw a valid insn
* dst_reg = *(u32 *)(src_reg + off)
* use reserved 'imm' field to mark this insn
*/
insn->imm = src_reg_type;
} else if (src_reg_type != insn->imm &&
(src_reg_type == PTR_TO_CTX ||
insn->imm == PTR_TO_CTX)) {
/* ABuser program is trying to use the same insn
* dst_reg = *(u32*) (src_reg + off)
* with different pointer types:
* src_reg == ctx in one branch and
* src_reg == stack|map in some other branch.
* Reject it.
*/
verbose("same insn cannot be used with different pointers\n");
return -EINVAL;
}
} else if (class == BPF_STX) {
enum bpf_reg_type dst_reg_type;
if (BPF_MODE(insn->code) == BPF_XADD) {
err = check_xadd(env, insn);
if (err)
return err;
insn_idx++;
continue;
}
/* check src1 operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
/* check src2 operand */
err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
if (err)
return err;
dst_reg_type = regs[insn->dst_reg].type;
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
insn->src_reg);
if (err)
return err;
if (insn->imm == 0) {
insn->imm = dst_reg_type;
} else if (dst_reg_type != insn->imm &&
(dst_reg_type == PTR_TO_CTX ||
insn->imm == PTR_TO_CTX)) {
verbose("same insn cannot be used with different pointers\n");
return -EINVAL;
}
} else if (class == BPF_ST) {
if (BPF_MODE(insn->code) != BPF_MEM ||
insn->src_reg != BPF_REG_0) {
verbose("BPF_ST uses reserved fields\n");
return -EINVAL;
}
/* check src operand */
err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
if (err)
return err;
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
-1);
if (err)
return err;
} else if (class == BPF_JMP) {
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->off != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose("BPF_CALL uses reserved fields\n");
return -EINVAL;
}
err = check_call(env, insn->imm);
if (err)
return err;
} else if (opcode == BPF_JA) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose("BPF_JA uses reserved fields\n");
return -EINVAL;
}
insn_idx += insn->off + 1;
continue;
} else if (opcode == BPF_EXIT) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose("BPF_EXIT uses reserved fields\n");
return -EINVAL;
}
/* eBPF calling convetion is such that R0 is used
* to return the value from eBPF program.
* Make sure that it's readable at this time
* of bpf_exit, which means that program wrote
* something into it earlier
*/
err = check_reg_arg(regs, BPF_REG_0, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, BPF_REG_0)) {
verbose("R0 leaks addr as return value\n");
return -EACCES;
}
process_bpf_exit:
insn_idx = pop_stack(env, &prev_insn_idx);
if (insn_idx < 0) {
break;
} else {
do_print_state = true;
continue;
}
} else {
err = check_cond_jmp_op(env, insn, &insn_idx);
if (err)
return err;
}
} else if (class == BPF_LD) {
u8 mode = BPF_MODE(insn->code);
if (mode == BPF_ABS || mode == BPF_IND) {
err = check_ld_abs(env, insn);
if (err)
return err;
} else if (mode == BPF_IMM) {
err = check_ld_imm(env, insn);
if (err)
return err;
insn_idx++;
} else {
verbose("invalid BPF_LD mode\n");
return -EINVAL;
}
} else {
verbose("unknown insn class %d\n", class);
return -EINVAL;
}
insn_idx++;
}
return 0;
}
/* look for pseudo eBPF instructions that access map FDs and
* replace them with actual map pointers
*/
static int replace_map_fd_with_map_ptr(struct verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i, j;
for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX &&
(BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
verbose("BPF_LDX uses reserved fields\n");
return -EINVAL;
}
if (BPF_CLASS(insn->code) == BPF_STX &&
((BPF_MODE(insn->code) != BPF_MEM &&
BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
verbose("BPF_STX uses reserved fields\n");
return -EINVAL;
}
if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
struct bpf_map *map;
struct fd f;
if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
insn[1].off != 0) {
verbose("invalid bpf_ld_imm64 insn\n");
return -EINVAL;
}
if (insn->src_reg == 0)
/* valid generic load 64-bit imm */
goto next_insn;
if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
verbose("unrecognized bpf_ld_imm64 insn\n");
return -EINVAL;
}
f = fdget(insn->imm);
map = __bpf_map_get(f);
if (IS_ERR(map)) {
verbose("fd %d is not pointing to valid bpf_map\n",
insn->imm);
fdput(f);
return PTR_ERR(map);
}
/* store map pointer inside BPF_LD_IMM64 instruction */
insn[0].imm = (u32) (unsigned long) map;
insn[1].imm = ((u64) (unsigned long) map) >> 32;
/* check whether we recorded this map already */
for (j = 0; j < env->used_map_cnt; j++)
if (env->used_maps[j] == map) {
fdput(f);
goto next_insn;
}
if (env->used_map_cnt >= MAX_USED_MAPS) {
fdput(f);
return -E2BIG;
}
/* remember this map */
env->used_maps[env->used_map_cnt++] = map;
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded
* and all maps are released in free_bpf_prog_info()
*/
bpf_map_inc(map, false);
fdput(f);
next_insn:
insn++;
i++;
}
}
/* now all pseudo BPF_LD_IMM64 instructions load valid
* 'struct bpf_map *' into a register instead of user map_fd.
* These pointers will be used later by verifier to validate map access.
*/
return 0;
}
/* drop refcnt of maps used by the rejected program */
static void release_maps(struct verifier_env *env)
{
int i;
for (i = 0; i < env->used_map_cnt; i++)
bpf_map_put(env->used_maps[i]);
}
/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
static void convert_pseudo_ld_imm64(struct verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i;
for (i = 0; i < insn_cnt; i++, insn++)
if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
insn->src_reg = 0;
}
static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
{
struct bpf_insn *insn = prog->insnsi;
int insn_cnt = prog->len;
int i;
for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) != BPF_JMP ||
BPF_OP(insn->code) == BPF_CALL ||
BPF_OP(insn->code) == BPF_EXIT)
continue;
/* adjust offset of jmps if necessary */
if (i < pos && i + insn->off + 1 > pos)
insn->off += delta;
else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
insn->off -= delta;
}
}
/* convert load instructions that access fields of 'struct __sk_buff'
* into sequence of instructions that access fields of 'struct sk_buff'
*/
static int convert_ctx_accesses(struct verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
struct bpf_insn insn_buf[16];
struct bpf_prog *new_prog;
u32 cnt;
int i;
enum bpf_access_type type;
if (!env->prog->aux->ops->convert_ctx_access)
return 0;
for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code == (BPF_LDX | BPF_MEM | BPF_W))
type = BPF_READ;
else if (insn->code == (BPF_STX | BPF_MEM | BPF_W))
type = BPF_WRITE;
else
continue;
if (insn->imm != PTR_TO_CTX) {
/* clear internal mark */
insn->imm = 0;
continue;
}
cnt = env->prog->aux->ops->
convert_ctx_access(type, insn->dst_reg, insn->src_reg,
insn->off, insn_buf, env->prog);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose("bpf verifier is misconfigured\n");
return -EINVAL;
}
if (cnt == 1) {
memcpy(insn, insn_buf, sizeof(*insn));
continue;
}
/* several new insns need to be inserted. Make room for them */
insn_cnt += cnt - 1;
new_prog = bpf_prog_realloc(env->prog,
bpf_prog_size(insn_cnt),
GFP_USER);
if (!new_prog)
return -ENOMEM;
new_prog->len = insn_cnt;
memmove(new_prog->insnsi + i + cnt, new_prog->insns + i + 1,
sizeof(*insn) * (insn_cnt - i - cnt));
/* copy substitute insns in place of load instruction */
memcpy(new_prog->insnsi + i, insn_buf, sizeof(*insn) * cnt);
/* adjust branches in the whole program */
adjust_branches(new_prog, i, cnt - 1);
/* keep walking new program and skip insns we just inserted */
env->prog = new_prog;
insn = new_prog->insnsi + i + cnt - 1;
i += cnt - 1;
}
return 0;
}
static void free_states(struct verifier_env *env)
{
struct verifier_state_list *sl, *sln;
int i;
if (!env->explored_states)
return;
for (i = 0; i < env->prog->len; i++) {
sl = env->explored_states[i];
if (sl)
while (sl != STATE_LIST_MARK) {
sln = sl->next;
kfree(sl);
sl = sln;
}
}
kfree(env->explored_states);
}
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
{
char __user *log_ubuf = NULL;
struct verifier_env *env;
int ret = -EINVAL;
if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS)
return -E2BIG;
/* 'struct verifier_env' can be global, but since it's not small,
* allocate/free it every time bpf_check() is called
*/
env = kzalloc(sizeof(struct verifier_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
env->prog = *prog;
/* grab the mutex to protect few globals used by verifier */
mutex_lock(&bpf_verifier_lock);
if (attr->log_level || attr->log_buf || attr->log_size) {
/* user requested verbose verifier output
* and supplied buffer to store the verification trace
*/
log_level = attr->log_level;
log_ubuf = (char __user *) (unsigned long) attr->log_buf;
log_size = attr->log_size;
log_len = 0;
ret = -EINVAL;
/* log_* values have to be sane */
if (log_size < 128 || log_size > UINT_MAX >> 8 ||
log_level == 0 || log_ubuf == NULL)
goto free_env;
ret = -ENOMEM;
log_buf = vmalloc(log_size);
if (!log_buf)
goto free_env;
} else {
log_level = 0;
}
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;
env->explored_states = kcalloc(env->prog->len,
sizeof(struct verifier_state_list *),
GFP_USER);
ret = -ENOMEM;
if (!env->explored_states)
goto skip_full_check;
ret = check_cfg(env);
if (ret < 0)
goto skip_full_check;
env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
ret = do_check(env);
skip_full_check:
while (pop_stack(env, NULL) >= 0);
free_states(env);
if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);
if (log_level && log_len >= log_size - 1) {
BUG_ON(log_len >= log_size);
/* verifier log exceeded user supplied buffer */
ret = -ENOSPC;
/* fall through to return what was recorded */
}
/* copy verifier log back to user space including trailing zero */
if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) {
ret = -EFAULT;
goto free_log_buf;
}
if (ret == 0 && env->used_map_cnt) {
/* if program passed verifier, update used_maps in bpf_prog_info */
env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
sizeof(env->used_maps[0]),
GFP_KERNEL);
if (!env->prog->aux->used_maps) {
ret = -ENOMEM;
goto free_log_buf;
}
memcpy(env->prog->aux->used_maps, env->used_maps,
sizeof(env->used_maps[0]) * env->used_map_cnt);
env->prog->aux->used_map_cnt = env->used_map_cnt;
/* program is valid. Convert pseudo bpf_ld_imm64 into generic
* bpf_ld_imm64 instructions
*/
convert_pseudo_ld_imm64(env);
}
free_log_buf:
if (log_level)
vfree(log_buf);
free_env:
if (!env->prog->aux->used_maps)
/* if we didn't copy map pointers into bpf_prog_info, release
* them now. Otherwise free_bpf_prog_info() will release them.
*/
release_maps(env);
*prog = env->prog;
kfree(env);
mutex_unlock(&bpf_verifier_lock);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_4948_0 |
crossvul-cpp_data_bad_2951_0 | // SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pte_t *pte;
int err = 0;
pte = pte_offset_map(pmd, addr);
for (;;) {
err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
if (err)
break;
addr += PAGE_SIZE;
if (addr == end)
break;
pte++;
}
pte_unmap(pte);
return err;
}
static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pmd_t *pmd;
unsigned long next;
int err = 0;
pmd = pmd_offset(pud, addr);
do {
again:
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd) || !walk->vma) {
if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk);
if (err)
break;
continue;
}
/*
* This implies that each ->pmd_entry() handler
* needs to know about pmd_trans_huge() pmds
*/
if (walk->pmd_entry)
err = walk->pmd_entry(pmd, addr, next, walk);
if (err)
break;
/*
* Check this here so we only break down trans_huge
* pages when we _need_ to
*/
if (!walk->pte_entry)
continue;
split_huge_pmd(walk->vma, pmd, addr);
if (pmd_trans_unstable(pmd))
goto again;
err = walk_pte_range(pmd, addr, next, walk);
if (err)
break;
} while (pmd++, addr = next, addr != end);
return err;
}
static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pud_t *pud;
unsigned long next;
int err = 0;
pud = pud_offset(p4d, addr);
do {
again:
next = pud_addr_end(addr, end);
if (pud_none(*pud) || !walk->vma) {
if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk);
if (err)
break;
continue;
}
if (walk->pud_entry) {
spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
if (ptl) {
err = walk->pud_entry(pud, addr, next, walk);
spin_unlock(ptl);
if (err)
break;
continue;
}
}
split_huge_pud(walk->vma, pud, addr);
if (pud_none(*pud))
goto again;
if (walk->pmd_entry || walk->pte_entry)
err = walk_pmd_range(pud, addr, next, walk);
if (err)
break;
} while (pud++, addr = next, addr != end);
return err;
}
static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
p4d_t *p4d;
unsigned long next;
int err = 0;
p4d = p4d_offset(pgd, addr);
do {
next = p4d_addr_end(addr, end);
if (p4d_none_or_clear_bad(p4d)) {
if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk);
if (err)
break;
continue;
}
if (walk->pmd_entry || walk->pte_entry)
err = walk_pud_range(p4d, addr, next, walk);
if (err)
break;
} while (p4d++, addr = next, addr != end);
return err;
}
static int walk_pgd_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pgd_t *pgd;
unsigned long next;
int err = 0;
pgd = pgd_offset(walk->mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd)) {
if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk);
if (err)
break;
continue;
}
if (walk->pmd_entry || walk->pte_entry)
err = walk_p4d_range(pgd, addr, next, walk);
if (err)
break;
} while (pgd++, addr = next, addr != end);
return err;
}
#ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
unsigned long end)
{
unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
return boundary < end ? boundary : end;
}
static int walk_hugetlb_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
struct hstate *h = hstate_vma(vma);
unsigned long next;
unsigned long hmask = huge_page_mask(h);
unsigned long sz = huge_page_size(h);
pte_t *pte;
int err = 0;
do {
next = hugetlb_entry_end(h, addr, end);
pte = huge_pte_offset(walk->mm, addr & hmask, sz);
if (pte && walk->hugetlb_entry)
err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
if (err)
break;
} while (addr = next, addr != end);
return err;
}
#else /* CONFIG_HUGETLB_PAGE */
static int walk_hugetlb_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
return 0;
}
#endif /* CONFIG_HUGETLB_PAGE */
/*
* Decide whether we really walk over the current vma on [@start, @end)
* or skip it via the returned value. Return 0 if we do walk over the
* current vma, and return 1 if we skip the vma. Negative values means
* error, where we abort the current walk.
*/
static int walk_page_test(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
if (walk->test_walk)
return walk->test_walk(start, end, walk);
/*
* vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
* range, so we don't walk over it as we do for normal vmas. However,
* Some callers are interested in handling hole range and they don't
* want to just ignore any single address range. Such users certainly
* define their ->pte_hole() callbacks, so let's delegate them to handle
* vma(VM_PFNMAP).
*/
if (vma->vm_flags & VM_PFNMAP) {
int err = 1;
if (walk->pte_hole)
err = walk->pte_hole(start, end, walk);
return err ? err : 1;
}
return 0;
}
static int __walk_page_range(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
int err = 0;
struct vm_area_struct *vma = walk->vma;
if (vma && is_vm_hugetlb_page(vma)) {
if (walk->hugetlb_entry)
err = walk_hugetlb_range(start, end, walk);
} else
err = walk_pgd_range(start, end, walk);
return err;
}
/**
* walk_page_range - walk page table with caller specific callbacks
*
* Recursively walk the page table tree of the process represented by @walk->mm
* within the virtual address range [@start, @end). During walking, we can do
* some caller-specific works for each entry, by setting up pmd_entry(),
* pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
* callbacks, the associated entries/pages are just ignored.
* The return values of these callbacks are commonly defined like below:
* - 0 : succeeded to handle the current entry, and if you don't reach the
* end address yet, continue to walk.
* - >0 : succeeded to handle the current entry, and return to the caller
* with caller specific value.
* - <0 : failed to handle the current entry, and return to the caller
* with error code.
*
* Before starting to walk page table, some callers want to check whether
* they really want to walk over the current vma, typically by checking
* its vm_flags. walk_page_test() and @walk->test_walk() are used for this
* purpose.
*
* struct mm_walk keeps current values of some common data like vma and pmd,
* which are useful for the access from callbacks. If you want to pass some
* caller-specific data to callbacks, @walk->private should be helpful.
*
* Locking:
* Callers of walk_page_range() and walk_page_vma() should hold
* @walk->mm->mmap_sem, because these function traverse vma list and/or
* access to vma's data.
*/
int walk_page_range(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
int err = 0;
unsigned long next;
struct vm_area_struct *vma;
if (start >= end)
return -EINVAL;
if (!walk->mm)
return -EINVAL;
VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
vma = find_vma(walk->mm, start);
do {
if (!vma) { /* after the last vma */
walk->vma = NULL;
next = end;
} else if (start < vma->vm_start) { /* outside vma */
walk->vma = NULL;
next = min(end, vma->vm_start);
} else { /* inside vma */
walk->vma = vma;
next = min(end, vma->vm_end);
vma = vma->vm_next;
err = walk_page_test(start, next, walk);
if (err > 0) {
/*
* positive return values are purely for
* controlling the pagewalk, so should never
* be passed to the callers.
*/
err = 0;
continue;
}
if (err < 0)
break;
}
if (walk->vma || walk->pte_hole)
err = __walk_page_range(start, next, walk);
if (err)
break;
} while (start = next, start < end);
return err;
}
int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
{
int err;
if (!walk->mm)
return -EINVAL;
VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
VM_BUG_ON(!vma);
walk->vma = vma;
err = walk_page_test(vma->vm_start, vma->vm_end, walk);
if (err > 0)
return 0;
if (err < 0)
return err;
return __walk_page_range(vma->vm_start, vma->vm_end, walk);
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_2951_0 |
crossvul-cpp_data_bad_3828_0 | /*
* IPVS An implementation of the IP virtual server support for the
* LINUX operating system. IPVS is now implemented as a module
* over the NetFilter framework. IPVS can be used to build a
* high-performance and highly available server based on a
* cluster of servers.
*
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
* Peter Kese <peter.kese@ijs.si>
* Julian Anastasov <ja@ssi.bg>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes:
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/swap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/mutex.h>
#include <net/net_namespace.h>
#include <linux/nsproxy.h>
#include <net/ip.h>
#ifdef CONFIG_IP_VS_IPV6
#include <net/ipv6.h>
#include <net/ip6_route.h>
#endif
#include <net/route.h>
#include <net/sock.h>
#include <net/genetlink.h>
#include <asm/uaccess.h>
#include <net/ip_vs.h>
/* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
static DEFINE_MUTEX(__ip_vs_mutex);
/* lock for service table */
static DEFINE_RWLOCK(__ip_vs_svc_lock);
/* sysctl variables */
#ifdef CONFIG_IP_VS_DEBUG
static int sysctl_ip_vs_debug_level = 0;
int ip_vs_get_debug_level(void)
{
return sysctl_ip_vs_debug_level;
}
#endif
/* Protos */
static void __ip_vs_del_service(struct ip_vs_service *svc);
#ifdef CONFIG_IP_VS_IPV6
/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
static bool __ip_vs_addr_is_local_v6(struct net *net,
const struct in6_addr *addr)
{
struct flowi6 fl6 = {
.daddr = *addr,
};
struct dst_entry *dst = ip6_route_output(net, NULL, &fl6);
bool is_local;
is_local = !dst->error && dst->dev && (dst->dev->flags & IFF_LOOPBACK);
dst_release(dst);
return is_local;
}
#endif
#ifdef CONFIG_SYSCTL
/*
* update_defense_level is called from keventd and from sysctl,
* so it needs to protect itself from softirqs
*/
static void update_defense_level(struct netns_ipvs *ipvs)
{
struct sysinfo i;
static int old_secure_tcp = 0;
int availmem;
int nomem;
int to_change = -1;
/* we only count free and buffered memory (in pages) */
si_meminfo(&i);
availmem = i.freeram + i.bufferram;
/* however in linux 2.5 the i.bufferram is total page cache size,
we need adjust it */
/* si_swapinfo(&i); */
/* availmem = availmem - (i.totalswap - i.freeswap); */
nomem = (availmem < ipvs->sysctl_amemthresh);
local_bh_disable();
/* drop_entry */
spin_lock(&ipvs->dropentry_lock);
switch (ipvs->sysctl_drop_entry) {
case 0:
atomic_set(&ipvs->dropentry, 0);
break;
case 1:
if (nomem) {
atomic_set(&ipvs->dropentry, 1);
ipvs->sysctl_drop_entry = 2;
} else {
atomic_set(&ipvs->dropentry, 0);
}
break;
case 2:
if (nomem) {
atomic_set(&ipvs->dropentry, 1);
} else {
atomic_set(&ipvs->dropentry, 0);
ipvs->sysctl_drop_entry = 1;
};
break;
case 3:
atomic_set(&ipvs->dropentry, 1);
break;
}
spin_unlock(&ipvs->dropentry_lock);
/* drop_packet */
spin_lock(&ipvs->droppacket_lock);
switch (ipvs->sysctl_drop_packet) {
case 0:
ipvs->drop_rate = 0;
break;
case 1:
if (nomem) {
ipvs->drop_rate = ipvs->drop_counter
= ipvs->sysctl_amemthresh /
(ipvs->sysctl_amemthresh-availmem);
ipvs->sysctl_drop_packet = 2;
} else {
ipvs->drop_rate = 0;
}
break;
case 2:
if (nomem) {
ipvs->drop_rate = ipvs->drop_counter
= ipvs->sysctl_amemthresh /
(ipvs->sysctl_amemthresh-availmem);
} else {
ipvs->drop_rate = 0;
ipvs->sysctl_drop_packet = 1;
}
break;
case 3:
ipvs->drop_rate = ipvs->sysctl_am_droprate;
break;
}
spin_unlock(&ipvs->droppacket_lock);
/* secure_tcp */
spin_lock(&ipvs->securetcp_lock);
switch (ipvs->sysctl_secure_tcp) {
case 0:
if (old_secure_tcp >= 2)
to_change = 0;
break;
case 1:
if (nomem) {
if (old_secure_tcp < 2)
to_change = 1;
ipvs->sysctl_secure_tcp = 2;
} else {
if (old_secure_tcp >= 2)
to_change = 0;
}
break;
case 2:
if (nomem) {
if (old_secure_tcp < 2)
to_change = 1;
} else {
if (old_secure_tcp >= 2)
to_change = 0;
ipvs->sysctl_secure_tcp = 1;
}
break;
case 3:
if (old_secure_tcp < 2)
to_change = 1;
break;
}
old_secure_tcp = ipvs->sysctl_secure_tcp;
if (to_change >= 0)
ip_vs_protocol_timeout_change(ipvs,
ipvs->sysctl_secure_tcp > 1);
spin_unlock(&ipvs->securetcp_lock);
local_bh_enable();
}
/*
* Timer for checking the defense
*/
#define DEFENSE_TIMER_PERIOD 1*HZ
static void defense_work_handler(struct work_struct *work)
{
struct netns_ipvs *ipvs =
container_of(work, struct netns_ipvs, defense_work.work);
update_defense_level(ipvs);
if (atomic_read(&ipvs->dropentry))
ip_vs_random_dropentry(ipvs->net);
schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
}
#endif
int
ip_vs_use_count_inc(void)
{
return try_module_get(THIS_MODULE);
}
void
ip_vs_use_count_dec(void)
{
module_put(THIS_MODULE);
}
/*
* Hash table: for virtual service lookups
*/
#define IP_VS_SVC_TAB_BITS 8
#define IP_VS_SVC_TAB_SIZE (1 << IP_VS_SVC_TAB_BITS)
#define IP_VS_SVC_TAB_MASK (IP_VS_SVC_TAB_SIZE - 1)
/* the service table hashed by <protocol, addr, port> */
static struct list_head ip_vs_svc_table[IP_VS_SVC_TAB_SIZE];
/* the service table hashed by fwmark */
static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE];
/*
* Returns hash value for virtual service
*/
static inline unsigned int
ip_vs_svc_hashkey(struct net *net, int af, unsigned int proto,
const union nf_inet_addr *addr, __be16 port)
{
register unsigned int porth = ntohs(port);
__be32 addr_fold = addr->ip;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
addr_fold ^= ((size_t)net>>8);
return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth)
& IP_VS_SVC_TAB_MASK;
}
/*
* Returns hash value of fwmark for virtual service lookup
*/
static inline unsigned int ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
{
return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
}
/*
* Hashes a service in the ip_vs_svc_table by <netns,proto,addr,port>
* or in the ip_vs_svc_fwm_table by fwmark.
* Should be called with locked tables.
*/
static int ip_vs_svc_hash(struct ip_vs_service *svc)
{
unsigned int hash;
if (svc->flags & IP_VS_SVC_F_HASHED) {
pr_err("%s(): request for already hashed, called from %pF\n",
__func__, __builtin_return_address(0));
return 0;
}
if (svc->fwmark == 0) {
/*
* Hash it by <netns,protocol,addr,port> in ip_vs_svc_table
*/
hash = ip_vs_svc_hashkey(svc->net, svc->af, svc->protocol,
&svc->addr, svc->port);
list_add(&svc->s_list, &ip_vs_svc_table[hash]);
} else {
/*
* Hash it by fwmark in svc_fwm_table
*/
hash = ip_vs_svc_fwm_hashkey(svc->net, svc->fwmark);
list_add(&svc->f_list, &ip_vs_svc_fwm_table[hash]);
}
svc->flags |= IP_VS_SVC_F_HASHED;
/* increase its refcnt because it is referenced by the svc table */
atomic_inc(&svc->refcnt);
return 1;
}
/*
* Unhashes a service from svc_table / svc_fwm_table.
* Should be called with locked tables.
*/
static int ip_vs_svc_unhash(struct ip_vs_service *svc)
{
if (!(svc->flags & IP_VS_SVC_F_HASHED)) {
pr_err("%s(): request for unhash flagged, called from %pF\n",
__func__, __builtin_return_address(0));
return 0;
}
if (svc->fwmark == 0) {
/* Remove it from the svc_table table */
list_del(&svc->s_list);
} else {
/* Remove it from the svc_fwm_table table */
list_del(&svc->f_list);
}
svc->flags &= ~IP_VS_SVC_F_HASHED;
atomic_dec(&svc->refcnt);
return 1;
}
/*
* Get service by {netns, proto,addr,port} in the service table.
*/
static inline struct ip_vs_service *
__ip_vs_service_find(struct net *net, int af, __u16 protocol,
const union nf_inet_addr *vaddr, __be16 vport)
{
unsigned int hash;
struct ip_vs_service *svc;
/* Check for "full" addressed entries */
hash = ip_vs_svc_hashkey(net, af, protocol, vaddr, vport);
list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){
if ((svc->af == af)
&& ip_vs_addr_equal(af, &svc->addr, vaddr)
&& (svc->port == vport)
&& (svc->protocol == protocol)
&& net_eq(svc->net, net)) {
/* HIT */
return svc;
}
}
return NULL;
}
/*
* Get service by {fwmark} in the service table.
*/
static inline struct ip_vs_service *
__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
{
unsigned int hash;
struct ip_vs_service *svc;
/* Check for fwmark addressed entries */
hash = ip_vs_svc_fwm_hashkey(net, fwmark);
list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) {
if (svc->fwmark == fwmark && svc->af == af
&& net_eq(svc->net, net)) {
/* HIT */
return svc;
}
}
return NULL;
}
struct ip_vs_service *
ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
const union nf_inet_addr *vaddr, __be16 vport)
{
struct ip_vs_service *svc;
struct netns_ipvs *ipvs = net_ipvs(net);
read_lock(&__ip_vs_svc_lock);
/*
* Check the table hashed by fwmark first
*/
if (fwmark) {
svc = __ip_vs_svc_fwm_find(net, af, fwmark);
if (svc)
goto out;
}
/*
* Check the table hashed by <protocol,addr,port>
* for "full" addressed entries
*/
svc = __ip_vs_service_find(net, af, protocol, vaddr, vport);
if (svc == NULL
&& protocol == IPPROTO_TCP
&& atomic_read(&ipvs->ftpsvc_counter)
&& (vport == FTPDATA || ntohs(vport) >= PROT_SOCK)) {
/*
* Check if ftp service entry exists, the packet
* might belong to FTP data connections.
*/
svc = __ip_vs_service_find(net, af, protocol, vaddr, FTPPORT);
}
if (svc == NULL
&& atomic_read(&ipvs->nullsvc_counter)) {
/*
* Check if the catch-all port (port zero) exists
*/
svc = __ip_vs_service_find(net, af, protocol, vaddr, 0);
}
out:
if (svc)
atomic_inc(&svc->usecnt);
read_unlock(&__ip_vs_svc_lock);
IP_VS_DBG_BUF(9, "lookup service: fwm %u %s %s:%u %s\n",
fwmark, ip_vs_proto_name(protocol),
IP_VS_DBG_ADDR(af, vaddr), ntohs(vport),
svc ? "hit" : "not hit");
return svc;
}
static inline void
__ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc)
{
atomic_inc(&svc->refcnt);
dest->svc = svc;
}
static void
__ip_vs_unbind_svc(struct ip_vs_dest *dest)
{
struct ip_vs_service *svc = dest->svc;
dest->svc = NULL;
if (atomic_dec_and_test(&svc->refcnt)) {
IP_VS_DBG_BUF(3, "Removing service %u/%s:%u usecnt=%d\n",
svc->fwmark,
IP_VS_DBG_ADDR(svc->af, &svc->addr),
ntohs(svc->port), atomic_read(&svc->usecnt));
free_percpu(svc->stats.cpustats);
kfree(svc);
}
}
/*
* Returns hash value for real service
*/
static inline unsigned int ip_vs_rs_hashkey(int af,
const union nf_inet_addr *addr,
__be16 port)
{
register unsigned int porth = ntohs(port);
__be32 addr_fold = addr->ip;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
return (ntohl(addr_fold)^(porth>>IP_VS_RTAB_BITS)^porth)
& IP_VS_RTAB_MASK;
}
/*
* Hashes ip_vs_dest in rs_table by <proto,addr,port>.
* should be called with locked tables.
*/
static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
{
unsigned int hash;
if (!list_empty(&dest->d_list)) {
return 0;
}
/*
* Hash by proto,addr,port,
* which are the parameters of the real service.
*/
hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port);
list_add(&dest->d_list, &ipvs->rs_table[hash]);
return 1;
}
/*
* UNhashes ip_vs_dest from rs_table.
* should be called with locked tables.
*/
static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
{
/*
* Remove it from the rs_table table.
*/
if (!list_empty(&dest->d_list)) {
list_del(&dest->d_list);
INIT_LIST_HEAD(&dest->d_list);
}
return 1;
}
/*
* Lookup real service by <proto,addr,port> in the real service table.
*/
struct ip_vs_dest *
ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
const union nf_inet_addr *daddr,
__be16 dport)
{
struct netns_ipvs *ipvs = net_ipvs(net);
unsigned int hash;
struct ip_vs_dest *dest;
/*
* Check for "full" addressed entries
* Return the first found entry
*/
hash = ip_vs_rs_hashkey(af, daddr, dport);
read_lock(&ipvs->rs_lock);
list_for_each_entry(dest, &ipvs->rs_table[hash], d_list) {
if ((dest->af == af)
&& ip_vs_addr_equal(af, &dest->addr, daddr)
&& (dest->port == dport)
&& ((dest->protocol == protocol) ||
dest->vfwmark)) {
/* HIT */
read_unlock(&ipvs->rs_lock);
return dest;
}
}
read_unlock(&ipvs->rs_lock);
return NULL;
}
/*
* Lookup destination by {addr,port} in the given service
*/
static struct ip_vs_dest *
ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
__be16 dport)
{
struct ip_vs_dest *dest;
/*
* Find the destination for the given service
*/
list_for_each_entry(dest, &svc->destinations, n_list) {
if ((dest->af == svc->af)
&& ip_vs_addr_equal(svc->af, &dest->addr, daddr)
&& (dest->port == dport)) {
/* HIT */
return dest;
}
}
return NULL;
}
/*
* Find destination by {daddr,dport,vaddr,protocol}
* Cretaed to be used in ip_vs_process_message() in
* the backup synchronization daemon. It finds the
* destination to be bound to the received connection
* on the backup.
*
* ip_vs_lookup_real_service() looked promissing, but
* seems not working as expected.
*/
struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af,
const union nf_inet_addr *daddr,
__be16 dport,
const union nf_inet_addr *vaddr,
__be16 vport, __u16 protocol, __u32 fwmark,
__u32 flags)
{
struct ip_vs_dest *dest;
struct ip_vs_service *svc;
__be16 port = dport;
svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport);
if (!svc)
return NULL;
if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ)
port = 0;
dest = ip_vs_lookup_dest(svc, daddr, port);
if (!dest)
dest = ip_vs_lookup_dest(svc, daddr, port ^ dport);
if (dest)
atomic_inc(&dest->refcnt);
ip_vs_service_put(svc);
return dest;
}
/*
* Lookup dest by {svc,addr,port} in the destination trash.
* The destination trash is used to hold the destinations that are removed
* from the service table but are still referenced by some conn entries.
* The reason to add the destination trash is when the dest is temporary
* down (either by administrator or by monitor program), the dest can be
* picked back from the trash, the remaining connections to the dest can
* continue, and the counting information of the dest is also useful for
* scheduling.
*/
static struct ip_vs_dest *
ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
__be16 dport)
{
struct ip_vs_dest *dest, *nxt;
struct netns_ipvs *ipvs = net_ipvs(svc->net);
/*
* Find the destination in trash
*/
list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) {
IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, "
"dest->refcnt=%d\n",
dest->vfwmark,
IP_VS_DBG_ADDR(svc->af, &dest->addr),
ntohs(dest->port),
atomic_read(&dest->refcnt));
if (dest->af == svc->af &&
ip_vs_addr_equal(svc->af, &dest->addr, daddr) &&
dest->port == dport &&
dest->vfwmark == svc->fwmark &&
dest->protocol == svc->protocol &&
(svc->fwmark ||
(ip_vs_addr_equal(svc->af, &dest->vaddr, &svc->addr) &&
dest->vport == svc->port))) {
/* HIT */
return dest;
}
/*
* Try to purge the destination from trash if not referenced
*/
if (atomic_read(&dest->refcnt) == 1) {
IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u "
"from trash\n",
dest->vfwmark,
IP_VS_DBG_ADDR(svc->af, &dest->addr),
ntohs(dest->port));
list_del(&dest->n_list);
ip_vs_dst_reset(dest);
__ip_vs_unbind_svc(dest);
free_percpu(dest->stats.cpustats);
kfree(dest);
}
}
return NULL;
}
/*
* Clean up all the destinations in the trash
* Called by the ip_vs_control_cleanup()
*
* When the ip_vs_control_clearup is activated by ipvs module exit,
* the service tables must have been flushed and all the connections
* are expired, and the refcnt of each destination in the trash must
* be 1, so we simply release them here.
*/
static void ip_vs_trash_cleanup(struct net *net)
{
struct ip_vs_dest *dest, *nxt;
struct netns_ipvs *ipvs = net_ipvs(net);
list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) {
list_del(&dest->n_list);
ip_vs_dst_reset(dest);
__ip_vs_unbind_svc(dest);
free_percpu(dest->stats.cpustats);
kfree(dest);
}
}
static void
ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
{
#define IP_VS_SHOW_STATS_COUNTER(c) dst->c = src->ustats.c - src->ustats0.c
spin_lock_bh(&src->lock);
IP_VS_SHOW_STATS_COUNTER(conns);
IP_VS_SHOW_STATS_COUNTER(inpkts);
IP_VS_SHOW_STATS_COUNTER(outpkts);
IP_VS_SHOW_STATS_COUNTER(inbytes);
IP_VS_SHOW_STATS_COUNTER(outbytes);
ip_vs_read_estimator(dst, src);
spin_unlock_bh(&src->lock);
}
static void
ip_vs_zero_stats(struct ip_vs_stats *stats)
{
spin_lock_bh(&stats->lock);
/* get current counters as zero point, rates are zeroed */
#define IP_VS_ZERO_STATS_COUNTER(c) stats->ustats0.c = stats->ustats.c
IP_VS_ZERO_STATS_COUNTER(conns);
IP_VS_ZERO_STATS_COUNTER(inpkts);
IP_VS_ZERO_STATS_COUNTER(outpkts);
IP_VS_ZERO_STATS_COUNTER(inbytes);
IP_VS_ZERO_STATS_COUNTER(outbytes);
ip_vs_zero_estimator(stats);
spin_unlock_bh(&stats->lock);
}
/*
* Update a destination in the given service
*/
static void
__ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
struct ip_vs_dest_user_kern *udest, int add)
{
struct netns_ipvs *ipvs = net_ipvs(svc->net);
int conn_flags;
/* set the weight and the flags */
atomic_set(&dest->weight, udest->weight);
conn_flags = udest->conn_flags & IP_VS_CONN_F_DEST_MASK;
conn_flags |= IP_VS_CONN_F_INACTIVE;
/* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */
if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ) {
conn_flags |= IP_VS_CONN_F_NOOUTPUT;
} else {
/*
* Put the real service in rs_table if not present.
* For now only for NAT!
*/
write_lock_bh(&ipvs->rs_lock);
ip_vs_rs_hash(ipvs, dest);
write_unlock_bh(&ipvs->rs_lock);
}
atomic_set(&dest->conn_flags, conn_flags);
/* bind the service */
if (!dest->svc) {
__ip_vs_bind_svc(dest, svc);
} else {
if (dest->svc != svc) {
__ip_vs_unbind_svc(dest);
ip_vs_zero_stats(&dest->stats);
__ip_vs_bind_svc(dest, svc);
}
}
/* set the dest status flags */
dest->flags |= IP_VS_DEST_F_AVAILABLE;
if (udest->u_threshold == 0 || udest->u_threshold > dest->u_threshold)
dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
dest->u_threshold = udest->u_threshold;
dest->l_threshold = udest->l_threshold;
spin_lock_bh(&dest->dst_lock);
ip_vs_dst_reset(dest);
spin_unlock_bh(&dest->dst_lock);
if (add)
ip_vs_start_estimator(svc->net, &dest->stats);
write_lock_bh(&__ip_vs_svc_lock);
/* Wait until all other svc users go away */
IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0);
if (add) {
list_add(&dest->n_list, &svc->destinations);
svc->num_dests++;
}
/* call the update_service, because server weight may be changed */
if (svc->scheduler->update_service)
svc->scheduler->update_service(svc);
write_unlock_bh(&__ip_vs_svc_lock);
}
/*
* Create a destination for the given service
*/
static int
ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
struct ip_vs_dest **dest_p)
{
struct ip_vs_dest *dest;
unsigned int atype;
EnterFunction(2);
#ifdef CONFIG_IP_VS_IPV6
if (svc->af == AF_INET6) {
atype = ipv6_addr_type(&udest->addr.in6);
if ((!(atype & IPV6_ADDR_UNICAST) ||
atype & IPV6_ADDR_LINKLOCAL) &&
!__ip_vs_addr_is_local_v6(svc->net, &udest->addr.in6))
return -EINVAL;
} else
#endif
{
atype = inet_addr_type(svc->net, udest->addr.ip);
if (atype != RTN_LOCAL && atype != RTN_UNICAST)
return -EINVAL;
}
dest = kzalloc(sizeof(struct ip_vs_dest), GFP_KERNEL);
if (dest == NULL)
return -ENOMEM;
dest->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
if (!dest->stats.cpustats)
goto err_alloc;
dest->af = svc->af;
dest->protocol = svc->protocol;
dest->vaddr = svc->addr;
dest->vport = svc->port;
dest->vfwmark = svc->fwmark;
ip_vs_addr_copy(svc->af, &dest->addr, &udest->addr);
dest->port = udest->port;
atomic_set(&dest->activeconns, 0);
atomic_set(&dest->inactconns, 0);
atomic_set(&dest->persistconns, 0);
atomic_set(&dest->refcnt, 1);
INIT_LIST_HEAD(&dest->d_list);
spin_lock_init(&dest->dst_lock);
spin_lock_init(&dest->stats.lock);
__ip_vs_update_dest(svc, dest, udest, 1);
*dest_p = dest;
LeaveFunction(2);
return 0;
err_alloc:
kfree(dest);
return -ENOMEM;
}
/*
* Add a destination into an existing service
*/
static int
ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
{
struct ip_vs_dest *dest;
union nf_inet_addr daddr;
__be16 dport = udest->port;
int ret;
EnterFunction(2);
if (udest->weight < 0) {
pr_err("%s(): server weight less than zero\n", __func__);
return -ERANGE;
}
if (udest->l_threshold > udest->u_threshold) {
pr_err("%s(): lower threshold is higher than upper threshold\n",
__func__);
return -ERANGE;
}
ip_vs_addr_copy(svc->af, &daddr, &udest->addr);
/*
* Check if the dest already exists in the list
*/
dest = ip_vs_lookup_dest(svc, &daddr, dport);
if (dest != NULL) {
IP_VS_DBG(1, "%s(): dest already exists\n", __func__);
return -EEXIST;
}
/*
* Check if the dest already exists in the trash and
* is from the same service
*/
dest = ip_vs_trash_get_dest(svc, &daddr, dport);
if (dest != NULL) {
IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, "
"dest->refcnt=%d, service %u/%s:%u\n",
IP_VS_DBG_ADDR(svc->af, &daddr), ntohs(dport),
atomic_read(&dest->refcnt),
dest->vfwmark,
IP_VS_DBG_ADDR(svc->af, &dest->vaddr),
ntohs(dest->vport));
/*
* Get the destination from the trash
*/
list_del(&dest->n_list);
__ip_vs_update_dest(svc, dest, udest, 1);
ret = 0;
} else {
/*
* Allocate and initialize the dest structure
*/
ret = ip_vs_new_dest(svc, udest, &dest);
}
LeaveFunction(2);
return ret;
}
/*
* Edit a destination in the given service
*/
static int
ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
{
struct ip_vs_dest *dest;
union nf_inet_addr daddr;
__be16 dport = udest->port;
EnterFunction(2);
if (udest->weight < 0) {
pr_err("%s(): server weight less than zero\n", __func__);
return -ERANGE;
}
if (udest->l_threshold > udest->u_threshold) {
pr_err("%s(): lower threshold is higher than upper threshold\n",
__func__);
return -ERANGE;
}
ip_vs_addr_copy(svc->af, &daddr, &udest->addr);
/*
* Lookup the destination list
*/
dest = ip_vs_lookup_dest(svc, &daddr, dport);
if (dest == NULL) {
IP_VS_DBG(1, "%s(): dest doesn't exist\n", __func__);
return -ENOENT;
}
__ip_vs_update_dest(svc, dest, udest, 0);
LeaveFunction(2);
return 0;
}
/*
* Delete a destination (must be already unlinked from the service)
*/
static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest)
{
struct netns_ipvs *ipvs = net_ipvs(net);
ip_vs_stop_estimator(net, &dest->stats);
/*
* Remove it from the d-linked list with the real services.
*/
write_lock_bh(&ipvs->rs_lock);
ip_vs_rs_unhash(dest);
write_unlock_bh(&ipvs->rs_lock);
/*
* Decrease the refcnt of the dest, and free the dest
* if nobody refers to it (refcnt=0). Otherwise, throw
* the destination into the trash.
*/
if (atomic_dec_and_test(&dest->refcnt)) {
IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u\n",
dest->vfwmark,
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port));
ip_vs_dst_reset(dest);
/* simply decrease svc->refcnt here, let the caller check
and release the service if nobody refers to it.
Only user context can release destination and service,
and only one user context can update virtual service at a
time, so the operation here is OK */
atomic_dec(&dest->svc->refcnt);
free_percpu(dest->stats.cpustats);
kfree(dest);
} else {
IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, "
"dest->refcnt=%d\n",
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port),
atomic_read(&dest->refcnt));
list_add(&dest->n_list, &ipvs->dest_trash);
atomic_inc(&dest->refcnt);
}
}
/*
* Unlink a destination from the given service
*/
static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
struct ip_vs_dest *dest,
int svcupd)
{
dest->flags &= ~IP_VS_DEST_F_AVAILABLE;
/*
* Remove it from the d-linked destination list.
*/
list_del(&dest->n_list);
svc->num_dests--;
/*
* Call the update_service function of its scheduler
*/
if (svcupd && svc->scheduler->update_service)
svc->scheduler->update_service(svc);
}
/*
* Delete a destination server in the given service
*/
static int
ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
{
struct ip_vs_dest *dest;
__be16 dport = udest->port;
EnterFunction(2);
dest = ip_vs_lookup_dest(svc, &udest->addr, dport);
if (dest == NULL) {
IP_VS_DBG(1, "%s(): destination not found!\n", __func__);
return -ENOENT;
}
write_lock_bh(&__ip_vs_svc_lock);
/*
* Wait until all other svc users go away.
*/
IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0);
/*
* Unlink dest from the service
*/
__ip_vs_unlink_dest(svc, dest, 1);
write_unlock_bh(&__ip_vs_svc_lock);
/*
* Delete the destination
*/
__ip_vs_del_dest(svc->net, dest);
LeaveFunction(2);
return 0;
}
/*
* Add a service into the service hash table
*/
static int
ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
struct ip_vs_service **svc_p)
{
int ret = 0;
struct ip_vs_scheduler *sched = NULL;
struct ip_vs_pe *pe = NULL;
struct ip_vs_service *svc = NULL;
struct netns_ipvs *ipvs = net_ipvs(net);
/* increase the module use count */
ip_vs_use_count_inc();
/* Lookup the scheduler by 'u->sched_name' */
sched = ip_vs_scheduler_get(u->sched_name);
if (sched == NULL) {
pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name);
ret = -ENOENT;
goto out_err;
}
if (u->pe_name && *u->pe_name) {
pe = ip_vs_pe_getbyname(u->pe_name);
if (pe == NULL) {
pr_info("persistence engine module ip_vs_pe_%s "
"not found\n", u->pe_name);
ret = -ENOENT;
goto out_err;
}
}
#ifdef CONFIG_IP_VS_IPV6
if (u->af == AF_INET6 && (u->netmask < 1 || u->netmask > 128)) {
ret = -EINVAL;
goto out_err;
}
#endif
svc = kzalloc(sizeof(struct ip_vs_service), GFP_KERNEL);
if (svc == NULL) {
IP_VS_DBG(1, "%s(): no memory\n", __func__);
ret = -ENOMEM;
goto out_err;
}
svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
if (!svc->stats.cpustats)
goto out_err;
/* I'm the first user of the service */
atomic_set(&svc->usecnt, 0);
atomic_set(&svc->refcnt, 0);
svc->af = u->af;
svc->protocol = u->protocol;
ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
svc->port = u->port;
svc->fwmark = u->fwmark;
svc->flags = u->flags;
svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask;
svc->net = net;
INIT_LIST_HEAD(&svc->destinations);
rwlock_init(&svc->sched_lock);
spin_lock_init(&svc->stats.lock);
/* Bind the scheduler */
ret = ip_vs_bind_scheduler(svc, sched);
if (ret)
goto out_err;
sched = NULL;
/* Bind the ct retriever */
ip_vs_bind_pe(svc, pe);
pe = NULL;
/* Update the virtual service counters */
if (svc->port == FTPPORT)
atomic_inc(&ipvs->ftpsvc_counter);
else if (svc->port == 0)
atomic_inc(&ipvs->nullsvc_counter);
ip_vs_start_estimator(net, &svc->stats);
/* Count only IPv4 services for old get/setsockopt interface */
if (svc->af == AF_INET)
ipvs->num_services++;
/* Hash the service into the service table */
write_lock_bh(&__ip_vs_svc_lock);
ip_vs_svc_hash(svc);
write_unlock_bh(&__ip_vs_svc_lock);
*svc_p = svc;
/* Now there is a service - full throttle */
ipvs->enable = 1;
return 0;
out_err:
if (svc != NULL) {
ip_vs_unbind_scheduler(svc);
if (svc->inc) {
local_bh_disable();
ip_vs_app_inc_put(svc->inc);
local_bh_enable();
}
if (svc->stats.cpustats)
free_percpu(svc->stats.cpustats);
kfree(svc);
}
ip_vs_scheduler_put(sched);
ip_vs_pe_put(pe);
/* decrease the module use count */
ip_vs_use_count_dec();
return ret;
}
/*
* Edit a service and bind it with a new scheduler
*/
static int
ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
{
struct ip_vs_scheduler *sched, *old_sched;
struct ip_vs_pe *pe = NULL, *old_pe = NULL;
int ret = 0;
/*
* Lookup the scheduler, by 'u->sched_name'
*/
sched = ip_vs_scheduler_get(u->sched_name);
if (sched == NULL) {
pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name);
return -ENOENT;
}
old_sched = sched;
if (u->pe_name && *u->pe_name) {
pe = ip_vs_pe_getbyname(u->pe_name);
if (pe == NULL) {
pr_info("persistence engine module ip_vs_pe_%s "
"not found\n", u->pe_name);
ret = -ENOENT;
goto out;
}
old_pe = pe;
}
#ifdef CONFIG_IP_VS_IPV6
if (u->af == AF_INET6 && (u->netmask < 1 || u->netmask > 128)) {
ret = -EINVAL;
goto out;
}
#endif
write_lock_bh(&__ip_vs_svc_lock);
/*
* Wait until all other svc users go away.
*/
IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0);
/*
* Set the flags and timeout value
*/
svc->flags = u->flags | IP_VS_SVC_F_HASHED;
svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask;
old_sched = svc->scheduler;
if (sched != old_sched) {
/*
* Unbind the old scheduler
*/
if ((ret = ip_vs_unbind_scheduler(svc))) {
old_sched = sched;
goto out_unlock;
}
/*
* Bind the new scheduler
*/
if ((ret = ip_vs_bind_scheduler(svc, sched))) {
/*
* If ip_vs_bind_scheduler fails, restore the old
* scheduler.
* The main reason of failure is out of memory.
*
* The question is if the old scheduler can be
* restored all the time. TODO: if it cannot be
* restored some time, we must delete the service,
* otherwise the system may crash.
*/
ip_vs_bind_scheduler(svc, old_sched);
old_sched = sched;
goto out_unlock;
}
}
old_pe = svc->pe;
if (pe != old_pe) {
ip_vs_unbind_pe(svc);
ip_vs_bind_pe(svc, pe);
}
out_unlock:
write_unlock_bh(&__ip_vs_svc_lock);
out:
ip_vs_scheduler_put(old_sched);
ip_vs_pe_put(old_pe);
return ret;
}
/*
* Delete a service from the service list
* - The service must be unlinked, unlocked and not referenced!
* - We are called under _bh lock
*/
static void __ip_vs_del_service(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest, *nxt;
struct ip_vs_scheduler *old_sched;
struct ip_vs_pe *old_pe;
struct netns_ipvs *ipvs = net_ipvs(svc->net);
pr_info("%s: enter\n", __func__);
/* Count only IPv4 services for old get/setsockopt interface */
if (svc->af == AF_INET)
ipvs->num_services--;
ip_vs_stop_estimator(svc->net, &svc->stats);
/* Unbind scheduler */
old_sched = svc->scheduler;
ip_vs_unbind_scheduler(svc);
ip_vs_scheduler_put(old_sched);
/* Unbind persistence engine */
old_pe = svc->pe;
ip_vs_unbind_pe(svc);
ip_vs_pe_put(old_pe);
/* Unbind app inc */
if (svc->inc) {
ip_vs_app_inc_put(svc->inc);
svc->inc = NULL;
}
/*
* Unlink the whole destination list
*/
list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
__ip_vs_unlink_dest(svc, dest, 0);
__ip_vs_del_dest(svc->net, dest);
}
/*
* Update the virtual service counters
*/
if (svc->port == FTPPORT)
atomic_dec(&ipvs->ftpsvc_counter);
else if (svc->port == 0)
atomic_dec(&ipvs->nullsvc_counter);
/*
* Free the service if nobody refers to it
*/
if (atomic_read(&svc->refcnt) == 0) {
IP_VS_DBG_BUF(3, "Removing service %u/%s:%u usecnt=%d\n",
svc->fwmark,
IP_VS_DBG_ADDR(svc->af, &svc->addr),
ntohs(svc->port), atomic_read(&svc->usecnt));
free_percpu(svc->stats.cpustats);
kfree(svc);
}
/* decrease the module use count */
ip_vs_use_count_dec();
}
/*
* Unlink a service from list and try to delete it if its refcnt reached 0
*/
static void ip_vs_unlink_service(struct ip_vs_service *svc)
{
/*
* Unhash it from the service table
*/
write_lock_bh(&__ip_vs_svc_lock);
ip_vs_svc_unhash(svc);
/*
* Wait until all the svc users go away.
*/
IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0);
__ip_vs_del_service(svc);
write_unlock_bh(&__ip_vs_svc_lock);
}
/*
* Delete a service from the service list
*/
static int ip_vs_del_service(struct ip_vs_service *svc)
{
if (svc == NULL)
return -EEXIST;
ip_vs_unlink_service(svc);
return 0;
}
/*
* Flush all the virtual services
*/
static int ip_vs_flush(struct net *net)
{
int idx;
struct ip_vs_service *svc, *nxt;
/*
* Flush the service table hashed by <netns,protocol,addr,port>
*/
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx],
s_list) {
if (net_eq(svc->net, net))
ip_vs_unlink_service(svc);
}
}
/*
* Flush the service table hashed by fwmark
*/
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry_safe(svc, nxt,
&ip_vs_svc_fwm_table[idx], f_list) {
if (net_eq(svc->net, net))
ip_vs_unlink_service(svc);
}
}
return 0;
}
/*
* Delete service by {netns} in the service table.
* Called by __ip_vs_cleanup()
*/
void ip_vs_service_net_cleanup(struct net *net)
{
EnterFunction(2);
/* Check for "full" addressed entries */
mutex_lock(&__ip_vs_mutex);
ip_vs_flush(net);
mutex_unlock(&__ip_vs_mutex);
LeaveFunction(2);
}
/*
* Release dst hold by dst_cache
*/
static inline void
__ip_vs_dev_reset(struct ip_vs_dest *dest, struct net_device *dev)
{
spin_lock_bh(&dest->dst_lock);
if (dest->dst_cache && dest->dst_cache->dev == dev) {
IP_VS_DBG_BUF(3, "Reset dev:%s dest %s:%u ,dest->refcnt=%d\n",
dev->name,
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port),
atomic_read(&dest->refcnt));
ip_vs_dst_reset(dest);
}
spin_unlock_bh(&dest->dst_lock);
}
/*
* Netdev event receiver
* Currently only NETDEV_UNREGISTER is handled, i.e. if we hold a reference to
* a device that is "unregister" it must be released.
*/
static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = ptr;
struct net *net = dev_net(dev);
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_service *svc;
struct ip_vs_dest *dest;
unsigned int idx;
if (event != NETDEV_UNREGISTER || !ipvs)
return NOTIFY_DONE;
IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
EnterFunction(2);
mutex_lock(&__ip_vs_mutex);
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
if (net_eq(svc->net, net)) {
list_for_each_entry(dest, &svc->destinations,
n_list) {
__ip_vs_dev_reset(dest, dev);
}
}
}
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
if (net_eq(svc->net, net)) {
list_for_each_entry(dest, &svc->destinations,
n_list) {
__ip_vs_dev_reset(dest, dev);
}
}
}
}
list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
__ip_vs_dev_reset(dest, dev);
}
mutex_unlock(&__ip_vs_mutex);
LeaveFunction(2);
return NOTIFY_DONE;
}
/*
* Zero counters in a service or all services
*/
static int ip_vs_zero_service(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest;
write_lock_bh(&__ip_vs_svc_lock);
list_for_each_entry(dest, &svc->destinations, n_list) {
ip_vs_zero_stats(&dest->stats);
}
ip_vs_zero_stats(&svc->stats);
write_unlock_bh(&__ip_vs_svc_lock);
return 0;
}
static int ip_vs_zero_all(struct net *net)
{
int idx;
struct ip_vs_service *svc;
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
if (net_eq(svc->net, net))
ip_vs_zero_service(svc);
}
}
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
if (net_eq(svc->net, net))
ip_vs_zero_service(svc);
}
}
ip_vs_zero_stats(&net_ipvs(net)->tot_stats);
return 0;
}
#ifdef CONFIG_SYSCTL
static int zero;
static int three = 3;
static int
proc_do_defense_mode(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = current->nsproxy->net_ns;
int *valp = table->data;
int val = *valp;
int rc;
rc = proc_dointvec(table, write, buffer, lenp, ppos);
if (write && (*valp != val)) {
if ((*valp < 0) || (*valp > 3)) {
/* Restore the correct value */
*valp = val;
} else {
update_defense_level(net_ipvs(net));
}
}
return rc;
}
static int
proc_do_sync_threshold(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
int val[2];
int rc;
/* backup the value first */
memcpy(val, valp, sizeof(val));
rc = proc_dointvec(table, write, buffer, lenp, ppos);
if (write && (valp[0] < 0 || valp[1] < 0 ||
(valp[0] >= valp[1] && valp[1]))) {
/* Restore the correct value */
memcpy(valp, val, sizeof(val));
}
return rc;
}
static int
proc_do_sync_mode(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
int val = *valp;
int rc;
rc = proc_dointvec(table, write, buffer, lenp, ppos);
if (write && (*valp != val)) {
if ((*valp < 0) || (*valp > 1)) {
/* Restore the correct value */
*valp = val;
}
}
return rc;
}
static int
proc_do_sync_ports(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
int val = *valp;
int rc;
rc = proc_dointvec(table, write, buffer, lenp, ppos);
if (write && (*valp != val)) {
if (*valp < 1 || !is_power_of_2(*valp)) {
/* Restore the correct value */
*valp = val;
}
}
return rc;
}
/*
* IPVS sysctl table (under the /proc/sys/net/ipv4/vs/)
* Do not change order or insert new entries without
* align with netns init in ip_vs_control_net_init()
*/
static struct ctl_table vs_vars[] = {
{
.procname = "amemthresh",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "am_droprate",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "drop_entry",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
},
{
.procname = "drop_packet",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
},
#ifdef CONFIG_IP_VS_NFCT
{
.procname = "conntrack",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
#endif
{
.procname = "secure_tcp",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
},
{
.procname = "snat_reroute",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
{
.procname = "sync_version",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_do_sync_mode,
},
{
.procname = "sync_ports",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_do_sync_ports,
},
{
.procname = "sync_qlen_max",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sync_sock_size",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "cache_bypass",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "expire_nodest_conn",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "expire_quiescent_template",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sync_threshold",
.maxlen =
sizeof(((struct netns_ipvs *)0)->sysctl_sync_threshold),
.mode = 0644,
.proc_handler = proc_do_sync_threshold,
},
{
.procname = "sync_refresh_period",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "sync_retries",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &three,
},
{
.procname = "nat_icmp_send",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_IP_VS_DEBUG
{
.procname = "debug_level",
.data = &sysctl_ip_vs_debug_level,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#if 0
{
.procname = "timeout_established",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_ESTABLISHED],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_synsent",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_SENT],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_synrecv",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_RECV],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_finwait",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_FIN_WAIT],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_timewait",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_TIME_WAIT],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_close",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_closewait",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE_WAIT],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_lastack",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_LAST_ACK],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_listen",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_LISTEN],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_synack",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_SYNACK],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_udp",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_UDP],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_icmp",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_ICMP],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
#endif
{ }
};
#endif
#ifdef CONFIG_PROC_FS
struct ip_vs_iter {
struct seq_net_private p; /* Do not move this, netns depends upon it*/
struct list_head *table;
int bucket;
};
/*
* Write the contents of the VS rule table to a PROCfs file.
* (It is kept just for backward compatibility)
*/
static inline const char *ip_vs_fwd_name(unsigned int flags)
{
switch (flags & IP_VS_CONN_F_FWD_MASK) {
case IP_VS_CONN_F_LOCALNODE:
return "Local";
case IP_VS_CONN_F_TUNNEL:
return "Tunnel";
case IP_VS_CONN_F_DROUTE:
return "Route";
default:
return "Masq";
}
}
/* Get the Nth entry in the two lists */
static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
{
struct net *net = seq_file_net(seq);
struct ip_vs_iter *iter = seq->private;
int idx;
struct ip_vs_service *svc;
/* look in hash by protocol */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
if (net_eq(svc->net, net) && pos-- == 0) {
iter->table = ip_vs_svc_table;
iter->bucket = idx;
return svc;
}
}
}
/* keep looking in fwmark */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
if (net_eq(svc->net, net) && pos-- == 0) {
iter->table = ip_vs_svc_fwm_table;
iter->bucket = idx;
return svc;
}
}
}
return NULL;
}
static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(__ip_vs_svc_lock)
{
read_lock_bh(&__ip_vs_svc_lock);
return *pos ? ip_vs_info_array(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *e;
struct ip_vs_iter *iter;
struct ip_vs_service *svc;
++*pos;
if (v == SEQ_START_TOKEN)
return ip_vs_info_array(seq,0);
svc = v;
iter = seq->private;
if (iter->table == ip_vs_svc_table) {
/* next service in table hashed by protocol */
if ((e = svc->s_list.next) != &ip_vs_svc_table[iter->bucket])
return list_entry(e, struct ip_vs_service, s_list);
while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
list_for_each_entry(svc,&ip_vs_svc_table[iter->bucket],
s_list) {
return svc;
}
}
iter->table = ip_vs_svc_fwm_table;
iter->bucket = -1;
goto scan_fwmark;
}
/* next service in hashed by fwmark */
if ((e = svc->f_list.next) != &ip_vs_svc_fwm_table[iter->bucket])
return list_entry(e, struct ip_vs_service, f_list);
scan_fwmark:
while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[iter->bucket],
f_list)
return svc;
}
return NULL;
}
static void ip_vs_info_seq_stop(struct seq_file *seq, void *v)
__releases(__ip_vs_svc_lock)
{
read_unlock_bh(&__ip_vs_svc_lock);
}
static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_printf(seq,
"IP Virtual Server version %d.%d.%d (size=%d)\n",
NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
seq_puts(seq,
"Prot LocalAddress:Port Scheduler Flags\n");
seq_puts(seq,
" -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n");
} else {
const struct ip_vs_service *svc = v;
const struct ip_vs_iter *iter = seq->private;
const struct ip_vs_dest *dest;
if (iter->table == ip_vs_svc_table) {
#ifdef CONFIG_IP_VS_IPV6
if (svc->af == AF_INET6)
seq_printf(seq, "%s [%pI6]:%04X %s ",
ip_vs_proto_name(svc->protocol),
&svc->addr.in6,
ntohs(svc->port),
svc->scheduler->name);
else
#endif
seq_printf(seq, "%s %08X:%04X %s %s ",
ip_vs_proto_name(svc->protocol),
ntohl(svc->addr.ip),
ntohs(svc->port),
svc->scheduler->name,
(svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
} else {
seq_printf(seq, "FWM %08X %s %s",
svc->fwmark, svc->scheduler->name,
(svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
}
if (svc->flags & IP_VS_SVC_F_PERSISTENT)
seq_printf(seq, "persistent %d %08X\n",
svc->timeout,
ntohl(svc->netmask));
else
seq_putc(seq, '\n');
list_for_each_entry(dest, &svc->destinations, n_list) {
#ifdef CONFIG_IP_VS_IPV6
if (dest->af == AF_INET6)
seq_printf(seq,
" -> [%pI6]:%04X"
" %-7s %-6d %-10d %-10d\n",
&dest->addr.in6,
ntohs(dest->port),
ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
atomic_read(&dest->weight),
atomic_read(&dest->activeconns),
atomic_read(&dest->inactconns));
else
#endif
seq_printf(seq,
" -> %08X:%04X "
"%-7s %-6d %-10d %-10d\n",
ntohl(dest->addr.ip),
ntohs(dest->port),
ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
atomic_read(&dest->weight),
atomic_read(&dest->activeconns),
atomic_read(&dest->inactconns));
}
}
return 0;
}
static const struct seq_operations ip_vs_info_seq_ops = {
.start = ip_vs_info_seq_start,
.next = ip_vs_info_seq_next,
.stop = ip_vs_info_seq_stop,
.show = ip_vs_info_seq_show,
};
static int ip_vs_info_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ip_vs_info_seq_ops,
sizeof(struct ip_vs_iter));
}
static const struct file_operations ip_vs_info_fops = {
.owner = THIS_MODULE,
.open = ip_vs_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int ip_vs_stats_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_single_net(seq);
struct ip_vs_stats_user show;
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Total Incoming Outgoing Incoming Outgoing\n");
seq_printf(seq,
" Conns Packets Packets Bytes Bytes\n");
ip_vs_copy_stats(&show, &net_ipvs(net)->tot_stats);
seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", show.conns,
show.inpkts, show.outpkts,
(unsigned long long) show.inbytes,
(unsigned long long) show.outbytes);
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
seq_printf(seq, "%8X %8X %8X %16X %16X\n",
show.cps, show.inpps, show.outpps,
show.inbps, show.outbps);
return 0;
}
static int ip_vs_stats_seq_open(struct inode *inode, struct file *file)
{
return single_open_net(inode, file, ip_vs_stats_show);
}
static const struct file_operations ip_vs_stats_fops = {
.owner = THIS_MODULE,
.open = ip_vs_stats_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release_net,
};
static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_single_net(seq);
struct ip_vs_stats *tot_stats = &net_ipvs(net)->tot_stats;
struct ip_vs_cpu_stats *cpustats = tot_stats->cpustats;
struct ip_vs_stats_user rates;
int i;
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Total Incoming Outgoing Incoming Outgoing\n");
seq_printf(seq,
"CPU Conns Packets Packets Bytes Bytes\n");
for_each_possible_cpu(i) {
struct ip_vs_cpu_stats *u = per_cpu_ptr(cpustats, i);
unsigned int start;
__u64 inbytes, outbytes;
do {
start = u64_stats_fetch_begin_bh(&u->syncp);
inbytes = u->ustats.inbytes;
outbytes = u->ustats.outbytes;
} while (u64_stats_fetch_retry_bh(&u->syncp, start));
seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
i, u->ustats.conns, u->ustats.inpkts,
u->ustats.outpkts, (__u64)inbytes,
(__u64)outbytes);
}
spin_lock_bh(&tot_stats->lock);
seq_printf(seq, " ~ %8X %8X %8X %16LX %16LX\n\n",
tot_stats->ustats.conns, tot_stats->ustats.inpkts,
tot_stats->ustats.outpkts,
(unsigned long long) tot_stats->ustats.inbytes,
(unsigned long long) tot_stats->ustats.outbytes);
ip_vs_read_estimator(&rates, tot_stats);
spin_unlock_bh(&tot_stats->lock);
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
seq_printf(seq, " %8X %8X %8X %16X %16X\n",
rates.cps,
rates.inpps,
rates.outpps,
rates.inbps,
rates.outbps);
return 0;
}
static int ip_vs_stats_percpu_seq_open(struct inode *inode, struct file *file)
{
return single_open_net(inode, file, ip_vs_stats_percpu_show);
}
static const struct file_operations ip_vs_stats_percpu_fops = {
.owner = THIS_MODULE,
.open = ip_vs_stats_percpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release_net,
};
#endif
/*
* Set timeout values for tcp tcpfin udp in the timeout_table.
*/
static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u)
{
#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
struct ip_vs_proto_data *pd;
#endif
IP_VS_DBG(2, "Setting timeout tcp:%d tcpfin:%d udp:%d\n",
u->tcp_timeout,
u->tcp_fin_timeout,
u->udp_timeout);
#ifdef CONFIG_IP_VS_PROTO_TCP
if (u->tcp_timeout) {
pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
= u->tcp_timeout * HZ;
}
if (u->tcp_fin_timeout) {
pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
pd->timeout_table[IP_VS_TCP_S_FIN_WAIT]
= u->tcp_fin_timeout * HZ;
}
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
if (u->udp_timeout) {
pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
pd->timeout_table[IP_VS_UDP_S_NORMAL]
= u->udp_timeout * HZ;
}
#endif
return 0;
}
#define SET_CMDID(cmd) (cmd - IP_VS_BASE_CTL)
#define SERVICE_ARG_LEN (sizeof(struct ip_vs_service_user))
#define SVCDEST_ARG_LEN (sizeof(struct ip_vs_service_user) + \
sizeof(struct ip_vs_dest_user))
#define TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user))
#define DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user))
#define MAX_ARG_LEN SVCDEST_ARG_LEN
static const unsigned char set_arglen[SET_CMDID(IP_VS_SO_SET_MAX)+1] = {
[SET_CMDID(IP_VS_SO_SET_ADD)] = SERVICE_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_EDIT)] = SERVICE_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_DEL)] = SERVICE_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_FLUSH)] = 0,
[SET_CMDID(IP_VS_SO_SET_ADDDEST)] = SVCDEST_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_DELDEST)] = SVCDEST_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_EDITDEST)] = SVCDEST_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_TIMEOUT)] = TIMEOUT_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_STARTDAEMON)] = DAEMON_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_STOPDAEMON)] = DAEMON_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_ZERO)] = SERVICE_ARG_LEN,
};
static void ip_vs_copy_usvc_compat(struct ip_vs_service_user_kern *usvc,
struct ip_vs_service_user *usvc_compat)
{
memset(usvc, 0, sizeof(*usvc));
usvc->af = AF_INET;
usvc->protocol = usvc_compat->protocol;
usvc->addr.ip = usvc_compat->addr;
usvc->port = usvc_compat->port;
usvc->fwmark = usvc_compat->fwmark;
/* Deep copy of sched_name is not needed here */
usvc->sched_name = usvc_compat->sched_name;
usvc->flags = usvc_compat->flags;
usvc->timeout = usvc_compat->timeout;
usvc->netmask = usvc_compat->netmask;
}
static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest,
struct ip_vs_dest_user *udest_compat)
{
memset(udest, 0, sizeof(*udest));
udest->addr.ip = udest_compat->addr;
udest->port = udest_compat->port;
udest->conn_flags = udest_compat->conn_flags;
udest->weight = udest_compat->weight;
udest->u_threshold = udest_compat->u_threshold;
udest->l_threshold = udest_compat->l_threshold;
}
static int
do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
{
struct net *net = sock_net(sk);
int ret;
unsigned char arg[MAX_ARG_LEN];
struct ip_vs_service_user *usvc_compat;
struct ip_vs_service_user_kern usvc;
struct ip_vs_service *svc;
struct ip_vs_dest_user *udest_compat;
struct ip_vs_dest_user_kern udest;
struct netns_ipvs *ipvs = net_ipvs(net);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX)
return -EINVAL;
if (len < 0 || len > MAX_ARG_LEN)
return -EINVAL;
if (len != set_arglen[SET_CMDID(cmd)]) {
pr_err("set_ctl: len %u != %u\n",
len, set_arglen[SET_CMDID(cmd)]);
return -EINVAL;
}
if (copy_from_user(arg, user, len) != 0)
return -EFAULT;
/* increase the module use count */
ip_vs_use_count_inc();
/* Handle daemons since they have another lock */
if (cmd == IP_VS_SO_SET_STARTDAEMON ||
cmd == IP_VS_SO_SET_STOPDAEMON) {
struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
if (mutex_lock_interruptible(&ipvs->sync_mutex)) {
ret = -ERESTARTSYS;
goto out_dec;
}
if (cmd == IP_VS_SO_SET_STARTDAEMON)
ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
dm->syncid);
else
ret = stop_sync_thread(net, dm->state);
mutex_unlock(&ipvs->sync_mutex);
goto out_dec;
}
if (mutex_lock_interruptible(&__ip_vs_mutex)) {
ret = -ERESTARTSYS;
goto out_dec;
}
if (cmd == IP_VS_SO_SET_FLUSH) {
/* Flush the virtual service */
ret = ip_vs_flush(net);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_TIMEOUT) {
/* Set timeout values for (tcp tcpfin udp) */
ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
goto out_unlock;
}
usvc_compat = (struct ip_vs_service_user *)arg;
udest_compat = (struct ip_vs_dest_user *)(usvc_compat + 1);
/* We only use the new structs internally, so copy userspace compat
* structs to extended internal versions */
ip_vs_copy_usvc_compat(&usvc, usvc_compat);
ip_vs_copy_udest_compat(&udest, udest_compat);
if (cmd == IP_VS_SO_SET_ZERO) {
/* if no service address is set, zero counters in all */
if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
ret = ip_vs_zero_all(net);
goto out_unlock;
}
}
/* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
usvc.protocol != IPPROTO_SCTP) {
pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n",
usvc.protocol, &usvc.addr.ip,
ntohs(usvc.port), usvc.sched_name);
ret = -EFAULT;
goto out_unlock;
}
/* Lookup the exact service by <protocol, addr, port> or fwmark */
if (usvc.fwmark == 0)
svc = __ip_vs_service_find(net, usvc.af, usvc.protocol,
&usvc.addr, usvc.port);
else
svc = __ip_vs_svc_fwm_find(net, usvc.af, usvc.fwmark);
if (cmd != IP_VS_SO_SET_ADD
&& (svc == NULL || svc->protocol != usvc.protocol)) {
ret = -ESRCH;
goto out_unlock;
}
switch (cmd) {
case IP_VS_SO_SET_ADD:
if (svc != NULL)
ret = -EEXIST;
else
ret = ip_vs_add_service(net, &usvc, &svc);
break;
case IP_VS_SO_SET_EDIT:
ret = ip_vs_edit_service(svc, &usvc);
break;
case IP_VS_SO_SET_DEL:
ret = ip_vs_del_service(svc);
if (!ret)
goto out_unlock;
break;
case IP_VS_SO_SET_ZERO:
ret = ip_vs_zero_service(svc);
break;
case IP_VS_SO_SET_ADDDEST:
ret = ip_vs_add_dest(svc, &udest);
break;
case IP_VS_SO_SET_EDITDEST:
ret = ip_vs_edit_dest(svc, &udest);
break;
case IP_VS_SO_SET_DELDEST:
ret = ip_vs_del_dest(svc, &udest);
break;
default:
ret = -EINVAL;
}
out_unlock:
mutex_unlock(&__ip_vs_mutex);
out_dec:
/* decrease the module use count */
ip_vs_use_count_dec();
return ret;
}
static void
ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
{
dst->protocol = src->protocol;
dst->addr = src->addr.ip;
dst->port = src->port;
dst->fwmark = src->fwmark;
strlcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name));
dst->flags = src->flags;
dst->timeout = src->timeout / HZ;
dst->netmask = src->netmask;
dst->num_dests = src->num_dests;
ip_vs_copy_stats(&dst->stats, &src->stats);
}
static inline int
__ip_vs_get_service_entries(struct net *net,
const struct ip_vs_get_services *get,
struct ip_vs_get_services __user *uptr)
{
int idx, count=0;
struct ip_vs_service *svc;
struct ip_vs_service_entry entry;
int ret = 0;
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
/* Only expose IPv4 entries to old interface */
if (svc->af != AF_INET || !net_eq(svc->net, net))
continue;
if (count >= get->num_services)
goto out;
memset(&entry, 0, sizeof(entry));
ip_vs_copy_service(&entry, svc);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
ret = -EFAULT;
goto out;
}
count++;
}
}
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
/* Only expose IPv4 entries to old interface */
if (svc->af != AF_INET || !net_eq(svc->net, net))
continue;
if (count >= get->num_services)
goto out;
memset(&entry, 0, sizeof(entry));
ip_vs_copy_service(&entry, svc);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
ret = -EFAULT;
goto out;
}
count++;
}
}
out:
return ret;
}
static inline int
__ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
struct ip_vs_get_dests __user *uptr)
{
struct ip_vs_service *svc;
union nf_inet_addr addr = { .ip = get->addr };
int ret = 0;
if (get->fwmark)
svc = __ip_vs_svc_fwm_find(net, AF_INET, get->fwmark);
else
svc = __ip_vs_service_find(net, AF_INET, get->protocol, &addr,
get->port);
if (svc) {
int count = 0;
struct ip_vs_dest *dest;
struct ip_vs_dest_entry entry;
list_for_each_entry(dest, &svc->destinations, n_list) {
if (count >= get->num_dests)
break;
entry.addr = dest->addr.ip;
entry.port = dest->port;
entry.conn_flags = atomic_read(&dest->conn_flags);
entry.weight = atomic_read(&dest->weight);
entry.u_threshold = dest->u_threshold;
entry.l_threshold = dest->l_threshold;
entry.activeconns = atomic_read(&dest->activeconns);
entry.inactconns = atomic_read(&dest->inactconns);
entry.persistconns = atomic_read(&dest->persistconns);
ip_vs_copy_stats(&entry.stats, &dest->stats);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
ret = -EFAULT;
break;
}
count++;
}
} else
ret = -ESRCH;
return ret;
}
static inline void
__ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u)
{
#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
struct ip_vs_proto_data *pd;
#endif
#ifdef CONFIG_IP_VS_PROTO_TCP
pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
u->tcp_fin_timeout = pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
u->udp_timeout =
pd->timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
#endif
}
#define GET_CMDID(cmd) (cmd - IP_VS_BASE_CTL)
#define GET_INFO_ARG_LEN (sizeof(struct ip_vs_getinfo))
#define GET_SERVICES_ARG_LEN (sizeof(struct ip_vs_get_services))
#define GET_SERVICE_ARG_LEN (sizeof(struct ip_vs_service_entry))
#define GET_DESTS_ARG_LEN (sizeof(struct ip_vs_get_dests))
#define GET_TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user))
#define GET_DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user) * 2)
static const unsigned char get_arglen[GET_CMDID(IP_VS_SO_GET_MAX)+1] = {
[GET_CMDID(IP_VS_SO_GET_VERSION)] = 64,
[GET_CMDID(IP_VS_SO_GET_INFO)] = GET_INFO_ARG_LEN,
[GET_CMDID(IP_VS_SO_GET_SERVICES)] = GET_SERVICES_ARG_LEN,
[GET_CMDID(IP_VS_SO_GET_SERVICE)] = GET_SERVICE_ARG_LEN,
[GET_CMDID(IP_VS_SO_GET_DESTS)] = GET_DESTS_ARG_LEN,
[GET_CMDID(IP_VS_SO_GET_TIMEOUT)] = GET_TIMEOUT_ARG_LEN,
[GET_CMDID(IP_VS_SO_GET_DAEMON)] = GET_DAEMON_ARG_LEN,
};
static int
do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
unsigned char arg[128];
int ret = 0;
unsigned int copylen;
struct net *net = sock_net(sk);
struct netns_ipvs *ipvs = net_ipvs(net);
BUG_ON(!net);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX)
return -EINVAL;
if (*len < get_arglen[GET_CMDID(cmd)]) {
pr_err("get_ctl: len %u < %u\n",
*len, get_arglen[GET_CMDID(cmd)]);
return -EINVAL;
}
copylen = get_arglen[GET_CMDID(cmd)];
if (copylen > 128)
return -EINVAL;
if (copy_from_user(arg, user, copylen) != 0)
return -EFAULT;
/*
* Handle daemons first since it has its own locking
*/
if (cmd == IP_VS_SO_GET_DAEMON) {
struct ip_vs_daemon_user d[2];
memset(&d, 0, sizeof(d));
if (mutex_lock_interruptible(&ipvs->sync_mutex))
return -ERESTARTSYS;
if (ipvs->sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
sizeof(d[0].mcast_ifn));
d[0].syncid = ipvs->master_syncid;
}
if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
sizeof(d[1].mcast_ifn));
d[1].syncid = ipvs->backup_syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
ret = -EFAULT;
mutex_unlock(&ipvs->sync_mutex);
return ret;
}
if (mutex_lock_interruptible(&__ip_vs_mutex))
return -ERESTARTSYS;
switch (cmd) {
case IP_VS_SO_GET_VERSION:
{
char buf[64];
sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)",
NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
ret = -EFAULT;
goto out;
}
*len = strlen(buf)+1;
}
break;
case IP_VS_SO_GET_INFO:
{
struct ip_vs_getinfo info;
info.version = IP_VS_VERSION_CODE;
info.size = ip_vs_conn_tab_size;
info.num_services = ipvs->num_services;
if (copy_to_user(user, &info, sizeof(info)) != 0)
ret = -EFAULT;
}
break;
case IP_VS_SO_GET_SERVICES:
{
struct ip_vs_get_services *get;
int size;
get = (struct ip_vs_get_services *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_service_entry) * get->num_services;
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_service_entries(net, get, user);
}
break;
case IP_VS_SO_GET_SERVICE:
{
struct ip_vs_service_entry *entry;
struct ip_vs_service *svc;
union nf_inet_addr addr;
entry = (struct ip_vs_service_entry *)arg;
addr.ip = entry->addr;
if (entry->fwmark)
svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark);
else
svc = __ip_vs_service_find(net, AF_INET,
entry->protocol, &addr,
entry->port);
if (svc) {
ip_vs_copy_service(entry, svc);
if (copy_to_user(user, entry, sizeof(*entry)) != 0)
ret = -EFAULT;
} else
ret = -ESRCH;
}
break;
case IP_VS_SO_GET_DESTS:
{
struct ip_vs_get_dests *get;
int size;
get = (struct ip_vs_get_dests *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_dest_entry) * get->num_dests;
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_dest_entries(net, get, user);
}
break;
case IP_VS_SO_GET_TIMEOUT:
{
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(net, &t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
}
break;
default:
ret = -EINVAL;
}
out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
static struct nf_sockopt_ops ip_vs_sockopts = {
.pf = PF_INET,
.set_optmin = IP_VS_BASE_CTL,
.set_optmax = IP_VS_SO_SET_MAX+1,
.set = do_ip_vs_set_ctl,
.get_optmin = IP_VS_BASE_CTL,
.get_optmax = IP_VS_SO_GET_MAX+1,
.get = do_ip_vs_get_ctl,
.owner = THIS_MODULE,
};
/*
* Generic Netlink interface
*/
/* IPVS genetlink family */
static struct genl_family ip_vs_genl_family = {
.id = GENL_ID_GENERATE,
.hdrsize = 0,
.name = IPVS_GENL_NAME,
.version = IPVS_GENL_VERSION,
.maxattr = IPVS_CMD_MAX,
.netnsok = true, /* Make ipvsadm to work on netns */
};
/* Policy used for first-level command attributes */
static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
[IPVS_CMD_ATTR_SERVICE] = { .type = NLA_NESTED },
[IPVS_CMD_ATTR_DEST] = { .type = NLA_NESTED },
[IPVS_CMD_ATTR_DAEMON] = { .type = NLA_NESTED },
[IPVS_CMD_ATTR_TIMEOUT_TCP] = { .type = NLA_U32 },
[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN] = { .type = NLA_U32 },
[IPVS_CMD_ATTR_TIMEOUT_UDP] = { .type = NLA_U32 },
};
/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DAEMON */
static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
[IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 },
[IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING,
.len = IP_VS_IFNAME_MAXLEN },
[IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 },
};
/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_SERVICE */
static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
[IPVS_SVC_ATTR_AF] = { .type = NLA_U16 },
[IPVS_SVC_ATTR_PROTOCOL] = { .type = NLA_U16 },
[IPVS_SVC_ATTR_ADDR] = { .type = NLA_BINARY,
.len = sizeof(union nf_inet_addr) },
[IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 },
[IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 },
[IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING,
.len = IP_VS_SCHEDNAME_MAXLEN },
[IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING,
.len = IP_VS_PENAME_MAXLEN },
[IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,
.len = sizeof(struct ip_vs_flags) },
[IPVS_SVC_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPVS_SVC_ATTR_NETMASK] = { .type = NLA_U32 },
[IPVS_SVC_ATTR_STATS] = { .type = NLA_NESTED },
};
/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DEST */
static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = {
[IPVS_DEST_ATTR_ADDR] = { .type = NLA_BINARY,
.len = sizeof(union nf_inet_addr) },
[IPVS_DEST_ATTR_PORT] = { .type = NLA_U16 },
[IPVS_DEST_ATTR_FWD_METHOD] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_WEIGHT] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_U_THRESH] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_L_THRESH] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_ACTIVE_CONNS] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_INACT_CONNS] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_PERSIST_CONNS] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_STATS] = { .type = NLA_NESTED },
};
static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
struct ip_vs_stats *stats)
{
struct ip_vs_stats_user ustats;
struct nlattr *nl_stats = nla_nest_start(skb, container_type);
if (!nl_stats)
return -EMSGSIZE;
ip_vs_copy_stats(&ustats, stats);
if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns) ||
nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts) ||
nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts) ||
nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes) ||
nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes) ||
nla_put_u32(skb, IPVS_STATS_ATTR_CPS, ustats.cps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps))
goto nla_put_failure;
nla_nest_end(skb, nl_stats);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nl_stats);
return -EMSGSIZE;
}
static int ip_vs_genl_fill_service(struct sk_buff *skb,
struct ip_vs_service *svc)
{
struct nlattr *nl_service;
struct ip_vs_flags flags = { .flags = svc->flags,
.mask = ~0 };
nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
if (!nl_service)
return -EMSGSIZE;
if (nla_put_u16(skb, IPVS_SVC_ATTR_AF, svc->af))
goto nla_put_failure;
if (svc->fwmark) {
if (nla_put_u32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark))
goto nla_put_failure;
} else {
if (nla_put_u16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol) ||
nla_put(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr) ||
nla_put_u16(skb, IPVS_SVC_ATTR_PORT, svc->port))
goto nla_put_failure;
}
if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name) ||
(svc->pe &&
nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name)) ||
nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
nla_put_u32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask))
goto nla_put_failure;
if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
goto nla_put_failure;
nla_nest_end(skb, nl_service);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nl_service);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_service(struct sk_buff *skb,
struct ip_vs_service *svc,
struct netlink_callback *cb)
{
void *hdr;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
&ip_vs_genl_family, NLM_F_MULTI,
IPVS_CMD_NEW_SERVICE);
if (!hdr)
return -EMSGSIZE;
if (ip_vs_genl_fill_service(skb, svc) < 0)
goto nla_put_failure;
return genlmsg_end(skb, hdr);
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_services(struct sk_buff *skb,
struct netlink_callback *cb)
{
int idx = 0, i;
int start = cb->args[0];
struct ip_vs_service *svc;
struct net *net = skb_sknet(skb);
mutex_lock(&__ip_vs_mutex);
for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
if (++idx <= start || !net_eq(svc->net, net))
continue;
if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
idx--;
goto nla_put_failure;
}
}
}
for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
if (++idx <= start || !net_eq(svc->net, net))
continue;
if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
idx--;
goto nla_put_failure;
}
}
}
nla_put_failure:
mutex_unlock(&__ip_vs_mutex);
cb->args[0] = idx;
return skb->len;
}
static int ip_vs_genl_parse_service(struct net *net,
struct ip_vs_service_user_kern *usvc,
struct nlattr *nla, int full_entry,
struct ip_vs_service **ret_svc)
{
struct nlattr *attrs[IPVS_SVC_ATTR_MAX + 1];
struct nlattr *nla_af, *nla_port, *nla_fwmark, *nla_protocol, *nla_addr;
struct ip_vs_service *svc;
/* Parse mandatory identifying service fields first */
if (nla == NULL ||
nla_parse_nested(attrs, IPVS_SVC_ATTR_MAX, nla, ip_vs_svc_policy))
return -EINVAL;
nla_af = attrs[IPVS_SVC_ATTR_AF];
nla_protocol = attrs[IPVS_SVC_ATTR_PROTOCOL];
nla_addr = attrs[IPVS_SVC_ATTR_ADDR];
nla_port = attrs[IPVS_SVC_ATTR_PORT];
nla_fwmark = attrs[IPVS_SVC_ATTR_FWMARK];
if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr))))
return -EINVAL;
memset(usvc, 0, sizeof(*usvc));
usvc->af = nla_get_u16(nla_af);
#ifdef CONFIG_IP_VS_IPV6
if (usvc->af != AF_INET && usvc->af != AF_INET6)
#else
if (usvc->af != AF_INET)
#endif
return -EAFNOSUPPORT;
if (nla_fwmark) {
usvc->protocol = IPPROTO_TCP;
usvc->fwmark = nla_get_u32(nla_fwmark);
} else {
usvc->protocol = nla_get_u16(nla_protocol);
nla_memcpy(&usvc->addr, nla_addr, sizeof(usvc->addr));
usvc->port = nla_get_u16(nla_port);
usvc->fwmark = 0;
}
if (usvc->fwmark)
svc = __ip_vs_svc_fwm_find(net, usvc->af, usvc->fwmark);
else
svc = __ip_vs_service_find(net, usvc->af, usvc->protocol,
&usvc->addr, usvc->port);
*ret_svc = svc;
/* If a full entry was requested, check for the additional fields */
if (full_entry) {
struct nlattr *nla_sched, *nla_flags, *nla_pe, *nla_timeout,
*nla_netmask;
struct ip_vs_flags flags;
nla_sched = attrs[IPVS_SVC_ATTR_SCHED_NAME];
nla_pe = attrs[IPVS_SVC_ATTR_PE_NAME];
nla_flags = attrs[IPVS_SVC_ATTR_FLAGS];
nla_timeout = attrs[IPVS_SVC_ATTR_TIMEOUT];
nla_netmask = attrs[IPVS_SVC_ATTR_NETMASK];
if (!(nla_sched && nla_flags && nla_timeout && nla_netmask))
return -EINVAL;
nla_memcpy(&flags, nla_flags, sizeof(flags));
/* prefill flags from service if it already exists */
if (svc)
usvc->flags = svc->flags;
/* set new flags from userland */
usvc->flags = (usvc->flags & ~flags.mask) |
(flags.flags & flags.mask);
usvc->sched_name = nla_data(nla_sched);
usvc->pe_name = nla_pe ? nla_data(nla_pe) : NULL;
usvc->timeout = nla_get_u32(nla_timeout);
usvc->netmask = nla_get_u32(nla_netmask);
}
return 0;
}
static struct ip_vs_service *ip_vs_genl_find_service(struct net *net,
struct nlattr *nla)
{
struct ip_vs_service_user_kern usvc;
struct ip_vs_service *svc;
int ret;
ret = ip_vs_genl_parse_service(net, &usvc, nla, 0, &svc);
return ret ? ERR_PTR(ret) : svc;
}
static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
{
struct nlattr *nl_dest;
nl_dest = nla_nest_start(skb, IPVS_CMD_ATTR_DEST);
if (!nl_dest)
return -EMSGSIZE;
if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
(atomic_read(&dest->conn_flags) &
IP_VS_CONN_F_FWD_MASK)) ||
nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
atomic_read(&dest->weight)) ||
nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) ||
nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) ||
nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
atomic_read(&dest->activeconns)) ||
nla_put_u32(skb, IPVS_DEST_ATTR_INACT_CONNS,
atomic_read(&dest->inactconns)) ||
nla_put_u32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
atomic_read(&dest->persistconns)))
goto nla_put_failure;
if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
goto nla_put_failure;
nla_nest_end(skb, nl_dest);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nl_dest);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest,
struct netlink_callback *cb)
{
void *hdr;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
&ip_vs_genl_family, NLM_F_MULTI,
IPVS_CMD_NEW_DEST);
if (!hdr)
return -EMSGSIZE;
if (ip_vs_genl_fill_dest(skb, dest) < 0)
goto nla_put_failure;
return genlmsg_end(skb, hdr);
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_dests(struct sk_buff *skb,
struct netlink_callback *cb)
{
int idx = 0;
int start = cb->args[0];
struct ip_vs_service *svc;
struct ip_vs_dest *dest;
struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
struct net *net = skb_sknet(skb);
mutex_lock(&__ip_vs_mutex);
/* Try to find the service for which to dump destinations */
if (nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs,
IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy))
goto out_err;
svc = ip_vs_genl_find_service(net, attrs[IPVS_CMD_ATTR_SERVICE]);
if (IS_ERR(svc) || svc == NULL)
goto out_err;
/* Dump the destinations */
list_for_each_entry(dest, &svc->destinations, n_list) {
if (++idx <= start)
continue;
if (ip_vs_genl_dump_dest(skb, dest, cb) < 0) {
idx--;
goto nla_put_failure;
}
}
nla_put_failure:
cb->args[0] = idx;
out_err:
mutex_unlock(&__ip_vs_mutex);
return skb->len;
}
static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
struct nlattr *nla, int full_entry)
{
struct nlattr *attrs[IPVS_DEST_ATTR_MAX + 1];
struct nlattr *nla_addr, *nla_port;
/* Parse mandatory identifying destination fields first */
if (nla == NULL ||
nla_parse_nested(attrs, IPVS_DEST_ATTR_MAX, nla, ip_vs_dest_policy))
return -EINVAL;
nla_addr = attrs[IPVS_DEST_ATTR_ADDR];
nla_port = attrs[IPVS_DEST_ATTR_PORT];
if (!(nla_addr && nla_port))
return -EINVAL;
memset(udest, 0, sizeof(*udest));
nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
udest->port = nla_get_u16(nla_port);
/* If a full entry was requested, check for the additional fields */
if (full_entry) {
struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh,
*nla_l_thresh;
nla_fwd = attrs[IPVS_DEST_ATTR_FWD_METHOD];
nla_weight = attrs[IPVS_DEST_ATTR_WEIGHT];
nla_u_thresh = attrs[IPVS_DEST_ATTR_U_THRESH];
nla_l_thresh = attrs[IPVS_DEST_ATTR_L_THRESH];
if (!(nla_fwd && nla_weight && nla_u_thresh && nla_l_thresh))
return -EINVAL;
udest->conn_flags = nla_get_u32(nla_fwd)
& IP_VS_CONN_F_FWD_MASK;
udest->weight = nla_get_u32(nla_weight);
udest->u_threshold = nla_get_u32(nla_u_thresh);
udest->l_threshold = nla_get_u32(nla_l_thresh);
}
return 0;
}
static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __be32 state,
const char *mcast_ifn, __be32 syncid)
{
struct nlattr *nl_daemon;
nl_daemon = nla_nest_start(skb, IPVS_CMD_ATTR_DAEMON);
if (!nl_daemon)
return -EMSGSIZE;
if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) ||
nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn) ||
nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid))
goto nla_put_failure;
nla_nest_end(skb, nl_daemon);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nl_daemon);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __be32 state,
const char *mcast_ifn, __be32 syncid,
struct netlink_callback *cb)
{
void *hdr;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
&ip_vs_genl_family, NLM_F_MULTI,
IPVS_CMD_NEW_DAEMON);
if (!hdr)
return -EMSGSIZE;
if (ip_vs_genl_fill_daemon(skb, state, mcast_ifn, syncid))
goto nla_put_failure;
return genlmsg_end(skb, hdr);
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct net *net = skb_sknet(skb);
struct netns_ipvs *ipvs = net_ipvs(net);
mutex_lock(&ipvs->sync_mutex);
if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
ipvs->master_mcast_ifn,
ipvs->master_syncid, cb) < 0)
goto nla_put_failure;
cb->args[0] = 1;
}
if ((ipvs->sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
ipvs->backup_mcast_ifn,
ipvs->backup_syncid, cb) < 0)
goto nla_put_failure;
cb->args[1] = 1;
}
nla_put_failure:
mutex_unlock(&ipvs->sync_mutex);
return skb->len;
}
static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs)
{
if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
return -EINVAL;
return start_sync_thread(net,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]));
}
static int ip_vs_genl_del_daemon(struct net *net, struct nlattr **attrs)
{
if (!attrs[IPVS_DAEMON_ATTR_STATE])
return -EINVAL;
return stop_sync_thread(net,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
}
static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
{
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(net, &t);
if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN])
t.tcp_fin_timeout =
nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]);
if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
return ip_vs_set_timeout(net, &t);
}
static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info)
{
int ret = 0, cmd;
struct net *net;
struct netns_ipvs *ipvs;
net = skb_sknet(skb);
ipvs = net_ipvs(net);
cmd = info->genlhdr->cmd;
if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) {
struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1];
mutex_lock(&ipvs->sync_mutex);
if (!info->attrs[IPVS_CMD_ATTR_DAEMON] ||
nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX,
info->attrs[IPVS_CMD_ATTR_DAEMON],
ip_vs_daemon_policy)) {
ret = -EINVAL;
goto out;
}
if (cmd == IPVS_CMD_NEW_DAEMON)
ret = ip_vs_genl_new_daemon(net, daemon_attrs);
else
ret = ip_vs_genl_del_daemon(net, daemon_attrs);
out:
mutex_unlock(&ipvs->sync_mutex);
}
return ret;
}
static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
{
struct ip_vs_service *svc = NULL;
struct ip_vs_service_user_kern usvc;
struct ip_vs_dest_user_kern udest;
int ret = 0, cmd;
int need_full_svc = 0, need_full_dest = 0;
struct net *net;
net = skb_sknet(skb);
cmd = info->genlhdr->cmd;
mutex_lock(&__ip_vs_mutex);
if (cmd == IPVS_CMD_FLUSH) {
ret = ip_vs_flush(net);
goto out;
} else if (cmd == IPVS_CMD_SET_CONFIG) {
ret = ip_vs_genl_set_config(net, info->attrs);
goto out;
} else if (cmd == IPVS_CMD_ZERO &&
!info->attrs[IPVS_CMD_ATTR_SERVICE]) {
ret = ip_vs_zero_all(net);
goto out;
}
/* All following commands require a service argument, so check if we
* received a valid one. We need a full service specification when
* adding / editing a service. Only identifying members otherwise. */
if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
need_full_svc = 1;
ret = ip_vs_genl_parse_service(net, &usvc,
info->attrs[IPVS_CMD_ATTR_SERVICE],
need_full_svc, &svc);
if (ret)
goto out;
/* Unless we're adding a new service, the service must already exist */
if ((cmd != IPVS_CMD_NEW_SERVICE) && (svc == NULL)) {
ret = -ESRCH;
goto out;
}
/* Destination commands require a valid destination argument. For
* adding / editing a destination, we need a full destination
* specification. */
if (cmd == IPVS_CMD_NEW_DEST || cmd == IPVS_CMD_SET_DEST ||
cmd == IPVS_CMD_DEL_DEST) {
if (cmd != IPVS_CMD_DEL_DEST)
need_full_dest = 1;
ret = ip_vs_genl_parse_dest(&udest,
info->attrs[IPVS_CMD_ATTR_DEST],
need_full_dest);
if (ret)
goto out;
}
switch (cmd) {
case IPVS_CMD_NEW_SERVICE:
if (svc == NULL)
ret = ip_vs_add_service(net, &usvc, &svc);
else
ret = -EEXIST;
break;
case IPVS_CMD_SET_SERVICE:
ret = ip_vs_edit_service(svc, &usvc);
break;
case IPVS_CMD_DEL_SERVICE:
ret = ip_vs_del_service(svc);
/* do not use svc, it can be freed */
break;
case IPVS_CMD_NEW_DEST:
ret = ip_vs_add_dest(svc, &udest);
break;
case IPVS_CMD_SET_DEST:
ret = ip_vs_edit_dest(svc, &udest);
break;
case IPVS_CMD_DEL_DEST:
ret = ip_vs_del_dest(svc, &udest);
break;
case IPVS_CMD_ZERO:
ret = ip_vs_zero_service(svc);
break;
default:
ret = -EINVAL;
}
out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
void *reply;
int ret, cmd, reply_cmd;
struct net *net;
net = skb_sknet(skb);
cmd = info->genlhdr->cmd;
if (cmd == IPVS_CMD_GET_SERVICE)
reply_cmd = IPVS_CMD_NEW_SERVICE;
else if (cmd == IPVS_CMD_GET_INFO)
reply_cmd = IPVS_CMD_SET_INFO;
else if (cmd == IPVS_CMD_GET_CONFIG)
reply_cmd = IPVS_CMD_SET_CONFIG;
else {
pr_err("unknown Generic Netlink command\n");
return -EINVAL;
}
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
mutex_lock(&__ip_vs_mutex);
reply = genlmsg_put_reply(msg, info, &ip_vs_genl_family, 0, reply_cmd);
if (reply == NULL)
goto nla_put_failure;
switch (cmd) {
case IPVS_CMD_GET_SERVICE:
{
struct ip_vs_service *svc;
svc = ip_vs_genl_find_service(net,
info->attrs[IPVS_CMD_ATTR_SERVICE]);
if (IS_ERR(svc)) {
ret = PTR_ERR(svc);
goto out_err;
} else if (svc) {
ret = ip_vs_genl_fill_service(msg, svc);
if (ret)
goto nla_put_failure;
} else {
ret = -ESRCH;
goto out_err;
}
break;
}
case IPVS_CMD_GET_CONFIG:
{
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(net, &t);
#ifdef CONFIG_IP_VS_PROTO_TCP
if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP,
t.tcp_timeout) ||
nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
t.tcp_fin_timeout))
goto nla_put_failure;
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout))
goto nla_put_failure;
#endif
break;
}
case IPVS_CMD_GET_INFO:
if (nla_put_u32(msg, IPVS_INFO_ATTR_VERSION,
IP_VS_VERSION_CODE) ||
nla_put_u32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
ip_vs_conn_tab_size))
goto nla_put_failure;
break;
}
genlmsg_end(msg, reply);
ret = genlmsg_reply(msg, info);
goto out;
nla_put_failure:
pr_err("not enough space in Netlink message\n");
ret = -EMSGSIZE;
out_err:
nlmsg_free(msg);
out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
static struct genl_ops ip_vs_genl_ops[] __read_mostly = {
{
.cmd = IPVS_CMD_NEW_SERVICE,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_SET_SERVICE,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_DEL_SERVICE,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_GET_SERVICE,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_get_cmd,
.dumpit = ip_vs_genl_dump_services,
.policy = ip_vs_cmd_policy,
},
{
.cmd = IPVS_CMD_NEW_DEST,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_SET_DEST,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_DEL_DEST,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_GET_DEST,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.dumpit = ip_vs_genl_dump_dests,
},
{
.cmd = IPVS_CMD_NEW_DAEMON,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_daemon,
},
{
.cmd = IPVS_CMD_DEL_DAEMON,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_daemon,
},
{
.cmd = IPVS_CMD_GET_DAEMON,
.flags = GENL_ADMIN_PERM,
.dumpit = ip_vs_genl_dump_daemons,
},
{
.cmd = IPVS_CMD_SET_CONFIG,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_GET_CONFIG,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_get_cmd,
},
{
.cmd = IPVS_CMD_GET_INFO,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_get_cmd,
},
{
.cmd = IPVS_CMD_ZERO,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
.doit = ip_vs_genl_set_cmd,
},
{
.cmd = IPVS_CMD_FLUSH,
.flags = GENL_ADMIN_PERM,
.doit = ip_vs_genl_set_cmd,
},
};
static int __init ip_vs_genl_register(void)
{
return genl_register_family_with_ops(&ip_vs_genl_family,
ip_vs_genl_ops, ARRAY_SIZE(ip_vs_genl_ops));
}
static void ip_vs_genl_unregister(void)
{
genl_unregister_family(&ip_vs_genl_family);
}
/* End of Generic Netlink interface definitions */
/*
* per netns intit/exit func.
*/
#ifdef CONFIG_SYSCTL
int __net_init ip_vs_control_net_init_sysctl(struct net *net)
{
int idx;
struct netns_ipvs *ipvs = net_ipvs(net);
struct ctl_table *tbl;
atomic_set(&ipvs->dropentry, 0);
spin_lock_init(&ipvs->dropentry_lock);
spin_lock_init(&ipvs->droppacket_lock);
spin_lock_init(&ipvs->securetcp_lock);
if (!net_eq(net, &init_net)) {
tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL);
if (tbl == NULL)
return -ENOMEM;
} else
tbl = vs_vars;
/* Initialize sysctl defaults */
idx = 0;
ipvs->sysctl_amemthresh = 1024;
tbl[idx++].data = &ipvs->sysctl_amemthresh;
ipvs->sysctl_am_droprate = 10;
tbl[idx++].data = &ipvs->sysctl_am_droprate;
tbl[idx++].data = &ipvs->sysctl_drop_entry;
tbl[idx++].data = &ipvs->sysctl_drop_packet;
#ifdef CONFIG_IP_VS_NFCT
tbl[idx++].data = &ipvs->sysctl_conntrack;
#endif
tbl[idx++].data = &ipvs->sysctl_secure_tcp;
ipvs->sysctl_snat_reroute = 1;
tbl[idx++].data = &ipvs->sysctl_snat_reroute;
ipvs->sysctl_sync_ver = 1;
tbl[idx++].data = &ipvs->sysctl_sync_ver;
ipvs->sysctl_sync_ports = 1;
tbl[idx++].data = &ipvs->sysctl_sync_ports;
ipvs->sysctl_sync_qlen_max = nr_free_buffer_pages() / 32;
tbl[idx++].data = &ipvs->sysctl_sync_qlen_max;
ipvs->sysctl_sync_sock_size = 0;
tbl[idx++].data = &ipvs->sysctl_sync_sock_size;
tbl[idx++].data = &ipvs->sysctl_cache_bypass;
tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn;
tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template;
ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD;
ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
tbl[idx].data = &ipvs->sysctl_sync_threshold;
tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD;
tbl[idx++].data = &ipvs->sysctl_sync_refresh_period;
ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3);
tbl[idx++].data = &ipvs->sysctl_sync_retries;
tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
if (ipvs->sysctl_hdr == NULL) {
if (!net_eq(net, &init_net))
kfree(tbl);
return -ENOMEM;
}
ip_vs_start_estimator(net, &ipvs->tot_stats);
ipvs->sysctl_tbl = tbl;
/* Schedule defense work */
INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler);
schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
return 0;
}
void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
cancel_delayed_work_sync(&ipvs->defense_work);
cancel_work_sync(&ipvs->defense_work.work);
unregister_net_sysctl_table(ipvs->sysctl_hdr);
}
#else
int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
#endif
static struct notifier_block ip_vs_dst_notifier = {
.notifier_call = ip_vs_dst_event,
};
int __net_init ip_vs_control_net_init(struct net *net)
{
int idx;
struct netns_ipvs *ipvs = net_ipvs(net);
rwlock_init(&ipvs->rs_lock);
/* Initialize rs_table */
for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
INIT_LIST_HEAD(&ipvs->rs_table[idx]);
INIT_LIST_HEAD(&ipvs->dest_trash);
atomic_set(&ipvs->ftpsvc_counter, 0);
atomic_set(&ipvs->nullsvc_counter, 0);
/* procfs stats */
ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
if (!ipvs->tot_stats.cpustats)
return -ENOMEM;
spin_lock_init(&ipvs->tot_stats.lock);
proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops);
proc_net_fops_create(net, "ip_vs_stats_percpu", 0,
&ip_vs_stats_percpu_fops);
if (ip_vs_control_net_init_sysctl(net))
goto err;
return 0;
err:
free_percpu(ipvs->tot_stats.cpustats);
return -ENOMEM;
}
void __net_exit ip_vs_control_net_cleanup(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
ip_vs_trash_cleanup(net);
ip_vs_stop_estimator(net, &ipvs->tot_stats);
ip_vs_control_net_cleanup_sysctl(net);
proc_net_remove(net, "ip_vs_stats_percpu");
proc_net_remove(net, "ip_vs_stats");
proc_net_remove(net, "ip_vs");
free_percpu(ipvs->tot_stats.cpustats);
}
int __init ip_vs_register_nl_ioctl(void)
{
int ret;
ret = nf_register_sockopt(&ip_vs_sockopts);
if (ret) {
pr_err("cannot register sockopt.\n");
goto err_sock;
}
ret = ip_vs_genl_register();
if (ret) {
pr_err("cannot register Generic Netlink interface.\n");
goto err_genl;
}
return 0;
err_genl:
nf_unregister_sockopt(&ip_vs_sockopts);
err_sock:
return ret;
}
void ip_vs_unregister_nl_ioctl(void)
{
ip_vs_genl_unregister();
nf_unregister_sockopt(&ip_vs_sockopts);
}
int __init ip_vs_control_init(void)
{
int idx;
int ret;
EnterFunction(2);
/* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
}
smp_wmb(); /* Do we really need it now ? */
ret = register_netdevice_notifier(&ip_vs_dst_notifier);
if (ret < 0)
return ret;
LeaveFunction(2);
return 0;
}
void ip_vs_control_cleanup(void)
{
EnterFunction(2);
unregister_netdevice_notifier(&ip_vs_dst_notifier);
LeaveFunction(2);
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_3828_0 |
crossvul-cpp_data_bad_758_2 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/idr.h>
#include <linux/rculist.h>
#include <linux/nsproxy.h>
#include <linux/fs.h>
#include <linux/proc_ns.h>
#include <linux/file.h>
#include <linux/export.h>
#include <linux/user_namespace.h>
#include <linux/net_namespace.h>
#include <linux/sched/task.h>
#include <linux/uidgid.h>
#include <net/sock.h>
#include <net/netlink.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
/*
* Our network namespace constructor/destructor lists
*/
static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;
LIST_HEAD(net_namespace_list);
EXPORT_SYMBOL_GPL(net_namespace_list);
/* Protects net_namespace_list. Nests iside rtnl_lock() */
DECLARE_RWSEM(net_rwsem);
EXPORT_SYMBOL_GPL(net_rwsem);
struct net init_net = {
.count = REFCOUNT_INIT(1),
.dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
};
EXPORT_SYMBOL(init_net);
static bool init_net_initialized;
/*
* pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
* init_net_initialized and first_device pointer.
* This is internal net namespace object. Please, don't use it
* outside.
*/
DECLARE_RWSEM(pernet_ops_rwsem);
EXPORT_SYMBOL_GPL(pernet_ops_rwsem);
#define MIN_PERNET_OPS_ID \
((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
static struct net_generic *net_alloc_generic(void)
{
struct net_generic *ng;
unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
ng = kzalloc(generic_size, GFP_KERNEL);
if (ng)
ng->s.len = max_gen_ptrs;
return ng;
}
static int net_assign_generic(struct net *net, unsigned int id, void *data)
{
struct net_generic *ng, *old_ng;
BUG_ON(id < MIN_PERNET_OPS_ID);
old_ng = rcu_dereference_protected(net->gen,
lockdep_is_held(&pernet_ops_rwsem));
if (old_ng->s.len > id) {
old_ng->ptr[id] = data;
return 0;
}
ng = net_alloc_generic();
if (ng == NULL)
return -ENOMEM;
/*
* Some synchronisation notes:
*
* The net_generic explores the net->gen array inside rcu
* read section. Besides once set the net->gen->ptr[x]
* pointer never changes (see rules in netns/generic.h).
*
* That said, we simply duplicate this array and schedule
* the old copy for kfree after a grace period.
*/
memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
(old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
ng->ptr[id] = data;
rcu_assign_pointer(net->gen, ng);
kfree_rcu(old_ng, s.rcu);
return 0;
}
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
int err = -ENOMEM;
void *data = NULL;
if (ops->id && ops->size) {
data = kzalloc(ops->size, GFP_KERNEL);
if (!data)
goto out;
err = net_assign_generic(net, *ops->id, data);
if (err)
goto cleanup;
}
err = 0;
if (ops->init)
err = ops->init(net);
if (!err)
return 0;
cleanup:
kfree(data);
out:
return err;
}
static void ops_free(const struct pernet_operations *ops, struct net *net)
{
if (ops->id && ops->size) {
kfree(net_generic(net, *ops->id));
}
}
static void ops_exit_list(const struct pernet_operations *ops,
struct list_head *net_exit_list)
{
struct net *net;
if (ops->exit) {
list_for_each_entry(net, net_exit_list, exit_list)
ops->exit(net);
}
if (ops->exit_batch)
ops->exit_batch(net_exit_list);
}
static void ops_free_list(const struct pernet_operations *ops,
struct list_head *net_exit_list)
{
struct net *net;
if (ops->size && ops->id) {
list_for_each_entry(net, net_exit_list, exit_list)
ops_free(ops, net);
}
}
/* should be called with nsid_lock held */
static int alloc_netid(struct net *net, struct net *peer, int reqid)
{
int min = 0, max = 0;
if (reqid >= 0) {
min = reqid;
max = reqid + 1;
}
return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
}
/* This function is used by idr_for_each(). If net is equal to peer, the
* function returns the id so that idr_for_each() stops. Because we cannot
* returns the id 0 (idr_for_each() will not stop), we return the magic value
* NET_ID_ZERO (-1) for it.
*/
#define NET_ID_ZERO -1
static int net_eq_idr(int id, void *net, void *peer)
{
if (net_eq(net, peer))
return id ? : NET_ID_ZERO;
return 0;
}
/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
* is set to true, thus the caller knows that the new id must be notified via
* rtnl.
*/
static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
{
int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
bool alloc_it = *alloc;
*alloc = false;
/* Magic value for id 0. */
if (id == NET_ID_ZERO)
return 0;
if (id > 0)
return id;
if (alloc_it) {
id = alloc_netid(net, peer, -1);
*alloc = true;
return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
}
return NETNSA_NSID_NOT_ASSIGNED;
}
/* should be called with nsid_lock held */
static int __peernet2id(struct net *net, struct net *peer)
{
bool no = false;
return __peernet2id_alloc(net, peer, &no);
}
static void rtnl_net_notifyid(struct net *net, int cmd, int id);
/* This function returns the id of a peer netns. If no id is assigned, one will
* be allocated and returned.
*/
int peernet2id_alloc(struct net *net, struct net *peer)
{
bool alloc = false, alive = false;
int id;
if (refcount_read(&net->count) == 0)
return NETNSA_NSID_NOT_ASSIGNED;
spin_lock_bh(&net->nsid_lock);
/*
* When peer is obtained from RCU lists, we may race with
* its cleanup. Check whether it's alive, and this guarantees
* we never hash a peer back to net->netns_ids, after it has
* just been idr_remove()'d from there in cleanup_net().
*/
if (maybe_get_net(peer))
alive = alloc = true;
id = __peernet2id_alloc(net, peer, &alloc);
spin_unlock_bh(&net->nsid_lock);
if (alloc && id >= 0)
rtnl_net_notifyid(net, RTM_NEWNSID, id);
if (alive)
put_net(peer);
return id;
}
EXPORT_SYMBOL_GPL(peernet2id_alloc);
/* This function returns, if assigned, the id of a peer netns. */
int peernet2id(struct net *net, struct net *peer)
{
int id;
spin_lock_bh(&net->nsid_lock);
id = __peernet2id(net, peer);
spin_unlock_bh(&net->nsid_lock);
return id;
}
EXPORT_SYMBOL(peernet2id);
/* This function returns true is the peer netns has an id assigned into the
* current netns.
*/
bool peernet_has_id(struct net *net, struct net *peer)
{
return peernet2id(net, peer) >= 0;
}
struct net *get_net_ns_by_id(struct net *net, int id)
{
struct net *peer;
if (id < 0)
return NULL;
rcu_read_lock();
peer = idr_find(&net->netns_ids, id);
if (peer)
peer = maybe_get_net(peer);
rcu_read_unlock();
return peer;
}
/*
* setup_net runs the initializers for the network namespace object.
*/
static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
{
/* Must be called with pernet_ops_rwsem held */
const struct pernet_operations *ops, *saved_ops;
int error = 0;
LIST_HEAD(net_exit_list);
refcount_set(&net->count, 1);
refcount_set(&net->passive, 1);
net->dev_base_seq = 1;
net->user_ns = user_ns;
idr_init(&net->netns_ids);
spin_lock_init(&net->nsid_lock);
mutex_init(&net->ipv4.ra_mutex);
list_for_each_entry(ops, &pernet_list, list) {
error = ops_init(ops, net);
if (error < 0)
goto out_undo;
}
down_write(&net_rwsem);
list_add_tail_rcu(&net->list, &net_namespace_list);
up_write(&net_rwsem);
out:
return error;
out_undo:
/* Walk through the list backwards calling the exit functions
* for the pernet modules whose init functions did not fail.
*/
list_add(&net->exit_list, &net_exit_list);
saved_ops = ops;
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list);
ops = saved_ops;
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list);
rcu_barrier();
goto out;
}
static int __net_init net_defaults_init_net(struct net *net)
{
net->core.sysctl_somaxconn = SOMAXCONN;
return 0;
}
static struct pernet_operations net_defaults_ops = {
.init = net_defaults_init_net,
};
static __init int net_defaults_init(void)
{
if (register_pernet_subsys(&net_defaults_ops))
panic("Cannot initialize net default settings");
return 0;
}
core_initcall(net_defaults_init);
#ifdef CONFIG_NET_NS
static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
{
return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
}
static void dec_net_namespaces(struct ucounts *ucounts)
{
dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
}
static struct kmem_cache *net_cachep __ro_after_init;
static struct workqueue_struct *netns_wq;
static struct net *net_alloc(void)
{
struct net *net = NULL;
struct net_generic *ng;
ng = net_alloc_generic();
if (!ng)
goto out;
net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
if (!net)
goto out_free;
rcu_assign_pointer(net->gen, ng);
out:
return net;
out_free:
kfree(ng);
goto out;
}
static void net_free(struct net *net)
{
kfree(rcu_access_pointer(net->gen));
kmem_cache_free(net_cachep, net);
}
void net_drop_ns(void *p)
{
struct net *ns = p;
if (ns && refcount_dec_and_test(&ns->passive))
net_free(ns);
}
struct net *copy_net_ns(unsigned long flags,
struct user_namespace *user_ns, struct net *old_net)
{
struct ucounts *ucounts;
struct net *net;
int rv;
if (!(flags & CLONE_NEWNET))
return get_net(old_net);
ucounts = inc_net_namespaces(user_ns);
if (!ucounts)
return ERR_PTR(-ENOSPC);
net = net_alloc();
if (!net) {
rv = -ENOMEM;
goto dec_ucounts;
}
refcount_set(&net->passive, 1);
net->ucounts = ucounts;
get_user_ns(user_ns);
rv = down_read_killable(&pernet_ops_rwsem);
if (rv < 0)
goto put_userns;
rv = setup_net(net, user_ns);
up_read(&pernet_ops_rwsem);
if (rv < 0) {
put_userns:
put_user_ns(user_ns);
net_drop_ns(net);
dec_ucounts:
dec_net_namespaces(ucounts);
return ERR_PTR(rv);
}
return net;
}
/**
* net_ns_get_ownership - get sysfs ownership data for @net
* @net: network namespace in question (can be NULL)
* @uid: kernel user ID for sysfs objects
* @gid: kernel group ID for sysfs objects
*
* Returns the uid/gid pair of root in the user namespace associated with the
* given network namespace.
*/
void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
{
if (net) {
kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
if (uid_valid(ns_root_uid))
*uid = ns_root_uid;
if (gid_valid(ns_root_gid))
*gid = ns_root_gid;
} else {
*uid = GLOBAL_ROOT_UID;
*gid = GLOBAL_ROOT_GID;
}
}
EXPORT_SYMBOL_GPL(net_ns_get_ownership);
static void unhash_nsid(struct net *net, struct net *last)
{
struct net *tmp;
/* This function is only called from cleanup_net() work,
* and this work is the only process, that may delete
* a net from net_namespace_list. So, when the below
* is executing, the list may only grow. Thus, we do not
* use for_each_net_rcu() or net_rwsem.
*/
for_each_net(tmp) {
int id;
spin_lock_bh(&tmp->nsid_lock);
id = __peernet2id(tmp, net);
if (id >= 0)
idr_remove(&tmp->netns_ids, id);
spin_unlock_bh(&tmp->nsid_lock);
if (id >= 0)
rtnl_net_notifyid(tmp, RTM_DELNSID, id);
if (tmp == last)
break;
}
spin_lock_bh(&net->nsid_lock);
idr_destroy(&net->netns_ids);
spin_unlock_bh(&net->nsid_lock);
}
static LLIST_HEAD(cleanup_list);
static void cleanup_net(struct work_struct *work)
{
const struct pernet_operations *ops;
struct net *net, *tmp, *last;
struct llist_node *net_kill_list;
LIST_HEAD(net_exit_list);
/* Atomically snapshot the list of namespaces to cleanup */
net_kill_list = llist_del_all(&cleanup_list);
down_read(&pernet_ops_rwsem);
/* Don't let anyone else find us. */
down_write(&net_rwsem);
llist_for_each_entry(net, net_kill_list, cleanup_list)
list_del_rcu(&net->list);
/* Cache last net. After we unlock rtnl, no one new net
* added to net_namespace_list can assign nsid pointer
* to a net from net_kill_list (see peernet2id_alloc()).
* So, we skip them in unhash_nsid().
*
* Note, that unhash_nsid() does not delete nsid links
* between net_kill_list's nets, as they've already
* deleted from net_namespace_list. But, this would be
* useless anyway, as netns_ids are destroyed there.
*/
last = list_last_entry(&net_namespace_list, struct net, list);
up_write(&net_rwsem);
llist_for_each_entry(net, net_kill_list, cleanup_list) {
unhash_nsid(net, last);
list_add_tail(&net->exit_list, &net_exit_list);
}
/*
* Another CPU might be rcu-iterating the list, wait for it.
* This needs to be before calling the exit() notifiers, so
* the rcu_barrier() below isn't sufficient alone.
*/
synchronize_rcu();
/* Run all of the network namespace exit methods */
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list);
/* Free the net generic variables */
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list);
up_read(&pernet_ops_rwsem);
/* Ensure there are no outstanding rcu callbacks using this
* network namespace.
*/
rcu_barrier();
/* Finally it is safe to free my network namespace structure */
list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
list_del_init(&net->exit_list);
dec_net_namespaces(net->ucounts);
put_user_ns(net->user_ns);
net_drop_ns(net);
}
}
/**
* net_ns_barrier - wait until concurrent net_cleanup_work is done
*
* cleanup_net runs from work queue and will first remove namespaces
* from the global list, then run net exit functions.
*
* Call this in module exit path to make sure that all netns
* ->exit ops have been invoked before the function is removed.
*/
void net_ns_barrier(void)
{
down_write(&pernet_ops_rwsem);
up_write(&pernet_ops_rwsem);
}
EXPORT_SYMBOL(net_ns_barrier);
static DECLARE_WORK(net_cleanup_work, cleanup_net);
void __put_net(struct net *net)
{
/* Cleanup the network namespace in process context */
if (llist_add(&net->cleanup_list, &cleanup_list))
queue_work(netns_wq, &net_cleanup_work);
}
EXPORT_SYMBOL_GPL(__put_net);
struct net *get_net_ns_by_fd(int fd)
{
struct file *file;
struct ns_common *ns;
struct net *net;
file = proc_ns_fget(fd);
if (IS_ERR(file))
return ERR_CAST(file);
ns = get_proc_ns(file_inode(file));
if (ns->ops == &netns_operations)
net = get_net(container_of(ns, struct net, ns));
else
net = ERR_PTR(-EINVAL);
fput(file);
return net;
}
#else
struct net *get_net_ns_by_fd(int fd)
{
return ERR_PTR(-EINVAL);
}
#endif
EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
struct net *get_net_ns_by_pid(pid_t pid)
{
struct task_struct *tsk;
struct net *net;
/* Lookup the network namespace */
net = ERR_PTR(-ESRCH);
rcu_read_lock();
tsk = find_task_by_vpid(pid);
if (tsk) {
struct nsproxy *nsproxy;
task_lock(tsk);
nsproxy = tsk->nsproxy;
if (nsproxy)
net = get_net(nsproxy->net_ns);
task_unlock(tsk);
}
rcu_read_unlock();
return net;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
static __net_init int net_ns_net_init(struct net *net)
{
#ifdef CONFIG_NET_NS
net->ns.ops = &netns_operations;
#endif
return ns_alloc_inum(&net->ns);
}
static __net_exit void net_ns_net_exit(struct net *net)
{
ns_free_inum(&net->ns);
}
static struct pernet_operations __net_initdata net_ns_ops = {
.init = net_ns_net_init,
.exit = net_ns_net_exit,
};
static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
[NETNSA_NONE] = { .type = NLA_UNSPEC },
[NETNSA_NSID] = { .type = NLA_S32 },
[NETNSA_PID] = { .type = NLA_U32 },
[NETNSA_FD] = { .type = NLA_U32 },
[NETNSA_TARGET_NSID] = { .type = NLA_S32 },
};
static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
struct nlattr *nla;
struct net *peer;
int nsid, err;
err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
rtnl_net_policy, extack);
if (err < 0)
return err;
if (!tb[NETNSA_NSID]) {
NL_SET_ERR_MSG(extack, "nsid is missing");
return -EINVAL;
}
nsid = nla_get_s32(tb[NETNSA_NSID]);
if (tb[NETNSA_PID]) {
peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
nla = tb[NETNSA_PID];
} else if (tb[NETNSA_FD]) {
peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
nla = tb[NETNSA_FD];
} else {
NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
return -EINVAL;
}
if (IS_ERR(peer)) {
NL_SET_BAD_ATTR(extack, nla);
NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
return PTR_ERR(peer);
}
spin_lock_bh(&net->nsid_lock);
if (__peernet2id(net, peer) >= 0) {
spin_unlock_bh(&net->nsid_lock);
err = -EEXIST;
NL_SET_BAD_ATTR(extack, nla);
NL_SET_ERR_MSG(extack,
"Peer netns already has a nsid assigned");
goto out;
}
err = alloc_netid(net, peer, nsid);
spin_unlock_bh(&net->nsid_lock);
if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err);
err = 0;
} else if (err == -ENOSPC && nsid >= 0) {
err = -EEXIST;
NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
NL_SET_ERR_MSG(extack, "The specified nsid is already used");
}
out:
put_net(peer);
return err;
}
static int rtnl_net_get_size(void)
{
return NLMSG_ALIGN(sizeof(struct rtgenmsg))
+ nla_total_size(sizeof(s32)) /* NETNSA_NSID */
+ nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
;
}
struct net_fill_args {
u32 portid;
u32 seq;
int flags;
int cmd;
int nsid;
bool add_ref;
int ref_nsid;
};
static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
{
struct nlmsghdr *nlh;
struct rtgenmsg *rth;
nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
args->flags);
if (!nlh)
return -EMSGSIZE;
rth = nlmsg_data(nlh);
rth->rtgen_family = AF_UNSPEC;
if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
goto nla_put_failure;
if (args->add_ref &&
nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int rtnl_net_valid_getid_req(struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct nlattr **tb,
struct netlink_ext_ack *extack)
{
int i, err;
if (!netlink_strict_get_check(skb))
return nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
rtnl_net_policy, extack);
err = nlmsg_parse_strict(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
rtnl_net_policy, extack);
if (err)
return err;
for (i = 0; i <= NETNSA_MAX; i++) {
if (!tb[i])
continue;
switch (i) {
case NETNSA_PID:
case NETNSA_FD:
case NETNSA_NSID:
case NETNSA_TARGET_NSID:
break;
default:
NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
return -EINVAL;
}
}
return 0;
}
static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
struct net_fill_args fillargs = {
.portid = NETLINK_CB(skb).portid,
.seq = nlh->nlmsg_seq,
.cmd = RTM_NEWNSID,
};
struct net *peer, *target = net;
struct nlattr *nla;
struct sk_buff *msg;
int err;
err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
if (err < 0)
return err;
if (tb[NETNSA_PID]) {
peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
nla = tb[NETNSA_PID];
} else if (tb[NETNSA_FD]) {
peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
nla = tb[NETNSA_FD];
} else if (tb[NETNSA_NSID]) {
peer = get_net_ns_by_id(net, nla_get_u32(tb[NETNSA_NSID]));
if (!peer)
peer = ERR_PTR(-ENOENT);
nla = tb[NETNSA_NSID];
} else {
NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
return -EINVAL;
}
if (IS_ERR(peer)) {
NL_SET_BAD_ATTR(extack, nla);
NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
return PTR_ERR(peer);
}
if (tb[NETNSA_TARGET_NSID]) {
int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
if (IS_ERR(target)) {
NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
NL_SET_ERR_MSG(extack,
"Target netns reference is invalid");
err = PTR_ERR(target);
goto out;
}
fillargs.add_ref = true;
fillargs.ref_nsid = peernet2id(net, peer);
}
msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
if (!msg) {
err = -ENOMEM;
goto out;
}
fillargs.nsid = peernet2id(target, peer);
err = rtnl_net_fill(msg, &fillargs);
if (err < 0)
goto err_out;
err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
goto out;
err_out:
nlmsg_free(msg);
out:
if (fillargs.add_ref)
put_net(target);
put_net(peer);
return err;
}
struct rtnl_net_dump_cb {
struct net *tgt_net;
struct net *ref_net;
struct sk_buff *skb;
struct net_fill_args fillargs;
int idx;
int s_idx;
};
static int rtnl_net_dumpid_one(int id, void *peer, void *data)
{
struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
int ret;
if (net_cb->idx < net_cb->s_idx)
goto cont;
net_cb->fillargs.nsid = id;
if (net_cb->fillargs.add_ref)
net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
if (ret < 0)
return ret;
cont:
net_cb->idx++;
return 0;
}
static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
struct rtnl_net_dump_cb *net_cb,
struct netlink_callback *cb)
{
struct netlink_ext_ack *extack = cb->extack;
struct nlattr *tb[NETNSA_MAX + 1];
int err, i;
err = nlmsg_parse_strict(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
rtnl_net_policy, extack);
if (err < 0)
return err;
for (i = 0; i <= NETNSA_MAX; i++) {
if (!tb[i])
continue;
if (i == NETNSA_TARGET_NSID) {
struct net *net;
net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
if (IS_ERR(net)) {
NL_SET_BAD_ATTR(extack, tb[i]);
NL_SET_ERR_MSG(extack,
"Invalid target network namespace id");
return PTR_ERR(net);
}
net_cb->fillargs.add_ref = true;
net_cb->ref_net = net_cb->tgt_net;
net_cb->tgt_net = net;
} else {
NL_SET_BAD_ATTR(extack, tb[i]);
NL_SET_ERR_MSG(extack,
"Unsupported attribute in dump request");
return -EINVAL;
}
}
return 0;
}
static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
{
struct rtnl_net_dump_cb net_cb = {
.tgt_net = sock_net(skb->sk),
.skb = skb,
.fillargs = {
.portid = NETLINK_CB(cb->skb).portid,
.seq = cb->nlh->nlmsg_seq,
.flags = NLM_F_MULTI,
.cmd = RTM_NEWNSID,
},
.idx = 0,
.s_idx = cb->args[0],
};
int err = 0;
if (cb->strict_check) {
err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
if (err < 0)
goto end;
}
spin_lock_bh(&net_cb.tgt_net->nsid_lock);
if (net_cb.fillargs.add_ref &&
!net_eq(net_cb.ref_net, net_cb.tgt_net) &&
!spin_trylock_bh(&net_cb.ref_net->nsid_lock)) {
spin_unlock_bh(&net_cb.tgt_net->nsid_lock);
err = -EAGAIN;
goto end;
}
idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
if (net_cb.fillargs.add_ref &&
!net_eq(net_cb.ref_net, net_cb.tgt_net))
spin_unlock_bh(&net_cb.ref_net->nsid_lock);
spin_unlock_bh(&net_cb.tgt_net->nsid_lock);
cb->args[0] = net_cb.idx;
end:
if (net_cb.fillargs.add_ref)
put_net(net_cb.tgt_net);
return err < 0 ? err : skb->len;
}
static void rtnl_net_notifyid(struct net *net, int cmd, int id)
{
struct net_fill_args fillargs = {
.cmd = cmd,
.nsid = id,
};
struct sk_buff *msg;
int err = -ENOMEM;
msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
if (!msg)
goto out;
err = rtnl_net_fill(msg, &fillargs);
if (err < 0)
goto err_out;
rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
return;
err_out:
nlmsg_free(msg);
out:
rtnl_set_sk_err(net, RTNLGRP_NSID, err);
}
static int __init net_ns_init(void)
{
struct net_generic *ng;
#ifdef CONFIG_NET_NS
net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
SMP_CACHE_BYTES,
SLAB_PANIC|SLAB_ACCOUNT, NULL);
/* Create workqueue for cleanup */
netns_wq = create_singlethread_workqueue("netns");
if (!netns_wq)
panic("Could not create netns workq");
#endif
ng = net_alloc_generic();
if (!ng)
panic("Could not allocate generic netns");
rcu_assign_pointer(init_net.gen, ng);
down_write(&pernet_ops_rwsem);
if (setup_net(&init_net, &init_user_ns))
panic("Could not setup the initial network namespace");
init_net_initialized = true;
up_write(&pernet_ops_rwsem);
if (register_pernet_subsys(&net_ns_ops))
panic("Could not register network namespace subsystems");
rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
RTNL_FLAG_DOIT_UNLOCKED);
rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
RTNL_FLAG_DOIT_UNLOCKED);
return 0;
}
pure_initcall(net_ns_init);
#ifdef CONFIG_NET_NS
static int __register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
struct net *net;
int error;
LIST_HEAD(net_exit_list);
list_add_tail(&ops->list, list);
if (ops->init || (ops->id && ops->size)) {
/* We held write locked pernet_ops_rwsem, and parallel
* setup_net() and cleanup_net() are not possible.
*/
for_each_net(net) {
error = ops_init(ops, net);
if (error)
goto out_undo;
list_add_tail(&net->exit_list, &net_exit_list);
}
}
return 0;
out_undo:
/* If I have an error cleanup all namespaces I initialized */
list_del(&ops->list);
ops_exit_list(ops, &net_exit_list);
ops_free_list(ops, &net_exit_list);
return error;
}
static void __unregister_pernet_operations(struct pernet_operations *ops)
{
struct net *net;
LIST_HEAD(net_exit_list);
list_del(&ops->list);
/* See comment in __register_pernet_operations() */
for_each_net(net)
list_add_tail(&net->exit_list, &net_exit_list);
ops_exit_list(ops, &net_exit_list);
ops_free_list(ops, &net_exit_list);
}
#else
static int __register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
if (!init_net_initialized) {
list_add_tail(&ops->list, list);
return 0;
}
return ops_init(ops, &init_net);
}
static void __unregister_pernet_operations(struct pernet_operations *ops)
{
if (!init_net_initialized) {
list_del(&ops->list);
} else {
LIST_HEAD(net_exit_list);
list_add(&init_net.exit_list, &net_exit_list);
ops_exit_list(ops, &net_exit_list);
ops_free_list(ops, &net_exit_list);
}
}
#endif /* CONFIG_NET_NS */
static DEFINE_IDA(net_generic_ids);
static int register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
int error;
if (ops->id) {
error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
GFP_KERNEL);
if (error < 0)
return error;
*ops->id = error;
max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
}
error = __register_pernet_operations(list, ops);
if (error) {
rcu_barrier();
if (ops->id)
ida_free(&net_generic_ids, *ops->id);
}
return error;
}
static void unregister_pernet_operations(struct pernet_operations *ops)
{
__unregister_pernet_operations(ops);
rcu_barrier();
if (ops->id)
ida_free(&net_generic_ids, *ops->id);
}
/**
* register_pernet_subsys - register a network namespace subsystem
* @ops: pernet operations structure for the subsystem
*
* Register a subsystem which has init and exit functions
* that are called when network namespaces are created and
* destroyed respectively.
*
* When registered all network namespace init functions are
* called for every existing network namespace. Allowing kernel
* modules to have a race free view of the set of network namespaces.
*
* When a new network namespace is created all of the init
* methods are called in the order in which they were registered.
*
* When a network namespace is destroyed all of the exit methods
* are called in the reverse of the order with which they were
* registered.
*/
int register_pernet_subsys(struct pernet_operations *ops)
{
int error;
down_write(&pernet_ops_rwsem);
error = register_pernet_operations(first_device, ops);
up_write(&pernet_ops_rwsem);
return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);
/**
* unregister_pernet_subsys - unregister a network namespace subsystem
* @ops: pernet operations structure to manipulate
*
* Remove the pernet operations structure from the list to be
* used when network namespaces are created or destroyed. In
* addition run the exit method for all existing network
* namespaces.
*/
void unregister_pernet_subsys(struct pernet_operations *ops)
{
down_write(&pernet_ops_rwsem);
unregister_pernet_operations(ops);
up_write(&pernet_ops_rwsem);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
/**
* register_pernet_device - register a network namespace device
* @ops: pernet operations structure for the subsystem
*
* Register a device which has init and exit functions
* that are called when network namespaces are created and
* destroyed respectively.
*
* When registered all network namespace init functions are
* called for every existing network namespace. Allowing kernel
* modules to have a race free view of the set of network namespaces.
*
* When a new network namespace is created all of the init
* methods are called in the order in which they were registered.
*
* When a network namespace is destroyed all of the exit methods
* are called in the reverse of the order with which they were
* registered.
*/
int register_pernet_device(struct pernet_operations *ops)
{
int error;
down_write(&pernet_ops_rwsem);
error = register_pernet_operations(&pernet_list, ops);
if (!error && (first_device == &pernet_list))
first_device = &ops->list;
up_write(&pernet_ops_rwsem);
return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);
/**
* unregister_pernet_device - unregister a network namespace netdevice
* @ops: pernet operations structure to manipulate
*
* Remove the pernet operations structure from the list to be
* used when network namespaces are created or destroyed. In
* addition run the exit method for all existing network
* namespaces.
*/
void unregister_pernet_device(struct pernet_operations *ops)
{
down_write(&pernet_ops_rwsem);
if (&ops->list == first_device)
first_device = first_device->next;
unregister_pernet_operations(ops);
up_write(&pernet_ops_rwsem);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
#ifdef CONFIG_NET_NS
static struct ns_common *netns_get(struct task_struct *task)
{
struct net *net = NULL;
struct nsproxy *nsproxy;
task_lock(task);
nsproxy = task->nsproxy;
if (nsproxy)
net = get_net(nsproxy->net_ns);
task_unlock(task);
return net ? &net->ns : NULL;
}
static inline struct net *to_net_ns(struct ns_common *ns)
{
return container_of(ns, struct net, ns);
}
static void netns_put(struct ns_common *ns)
{
put_net(to_net_ns(ns));
}
static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
{
struct net *net = to_net_ns(ns);
if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
put_net(nsproxy->net_ns);
nsproxy->net_ns = get_net(net);
return 0;
}
static struct user_namespace *netns_owner(struct ns_common *ns)
{
return to_net_ns(ns)->user_ns;
}
const struct proc_ns_operations netns_operations = {
.name = "net",
.type = CLONE_NEWNET,
.get = netns_get,
.put = netns_put,
.install = netns_install,
.owner = netns_owner,
};
#endif
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_758_2 |
crossvul-cpp_data_good_2828_0 | /*
* History:
* Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
* to allow user process control of SCSI devices.
* Development Sponsored by Killy Corp. NY NY
*
* Original driver (sg.c):
* Copyright (C) 1992 Lawrence Foard
* Version 2 and 3 extensions to driver:
* Copyright (C) 1998 - 2014 Douglas Gilbert
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
*/
static int sg_version_num = 30536; /* 2 digits for each component */
#define SG_VERSION_STR "3.5.36"
/*
* D. P. Gilbert (dgilbert@interlog.com), notes:
* - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
* the kernel/module needs to be built with CONFIG_SCSI_LOGGING
* (otherwise the macros compile to empty statements).
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/mtio.h>
#include <linux/ioctl.h>
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/moduleparam.h>
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/seq_file.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/blktrace_api.h>
#include <linux/mutex.h>
#include <linux/atomic.h>
#include <linux/ratelimit.h>
#include <linux/uio.h>
#include "scsi.h"
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/sg.h>
#include "scsi_logging.h"
#ifdef CONFIG_SCSI_PROC_FS
#include <linux/proc_fs.h>
static char *sg_version_date = "20140603";
static int sg_proc_init(void);
static void sg_proc_cleanup(void);
#endif
#define SG_ALLOW_DIO_DEF 0
#define SG_MAX_DEVS 32768
/* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type
* of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater
* than 16 bytes are "variable length" whose length is a multiple of 4
*/
#define SG_MAX_CDB_SIZE 252
#define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
int sg_big_buff = SG_DEF_RESERVED_SIZE;
/* N.B. This variable is readable and writeable via
/proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
of this size (or less if there is not enough memory) will be reserved
for use by this file descriptor. [Deprecated usage: this variable is also
readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
the kernel (i.e. it is not a module).] */
static int def_reserved_size = -1; /* picks up init parameter */
static int sg_allow_dio = SG_ALLOW_DIO_DEF;
static int scatter_elem_sz = SG_SCATTER_SZ;
static int scatter_elem_sz_prev = SG_SCATTER_SZ;
#define SG_SECTOR_SZ 512
static int sg_add_device(struct device *, struct class_interface *);
static void sg_remove_device(struct device *, struct class_interface *);
static DEFINE_IDR(sg_index_idr);
static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
file descriptor list for device */
static struct class_interface sg_interface = {
.add_dev = sg_add_device,
.remove_dev = sg_remove_device,
};
typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
unsigned bufflen; /* Size of (aggregate) data buffer */
struct page **pages;
int page_order;
char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
unsigned char cmd_opcode; /* first byte of command */
} Sg_scatter_hold;
struct sg_device; /* forward declarations */
struct sg_fd;
typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
struct list_head entry; /* list entry */
struct sg_fd *parentfp; /* NULL -> not in use */
Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
char orphan; /* 1 -> drop on sight, 0 -> normal */
char sg_io_owned; /* 1 -> packet belongs to SG_IO */
/* done protected by rq_list_lock */
char done; /* 0->before bh, 1->before read, 2->read */
struct request *rq;
struct bio *bio;
struct execute_work ew;
} Sg_request;
typedef struct sg_fd { /* holds the state of a file descriptor */
struct list_head sfd_siblings; /* protected by device's sfd_lock */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
struct mutex f_mutex; /* protect against changes in this fd */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
struct list_head rq_list; /* head of request list */
struct fasync_struct *async_qp; /* used by asynchronous notification */
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
char mmap_called; /* 0 -> mmap() never called on this fd */
char res_in_use; /* 1 -> 'reserve' array in use */
struct kref f_ref;
struct execute_work ew;
} Sg_fd;
typedef struct sg_device { /* holds the state of each scsi generic device */
struct scsi_device *device;
wait_queue_head_t open_wait; /* queue open() when O_EXCL present */
struct mutex open_rel_lock; /* held when in open() or release() */
int sg_tablesize; /* adapter's max scatter-gather table size */
u32 index; /* device index number */
struct list_head sfds;
rwlock_t sfd_lock; /* protect access to sfd list */
atomic_t detaching; /* 0->device usable, 1->device detaching */
bool exclude; /* 1->open(O_EXCL) succeeded and is active */
int open_cnt; /* count of opens (perhaps < num(sfds) ) */
char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
struct gendisk *disk;
struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
struct kref d_ref;
} Sg_device;
/* tasklet or soft irq callback */
static void sg_rq_end_io(struct request *rq, blk_status_t status);
static int sg_start_req(Sg_request *srp, unsigned char *cmd);
static int sg_finish_rem_req(Sg_request * srp);
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
Sg_request * srp);
static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
const char __user *buf, size_t count, int blocking,
int read_only, int sg_io_owned, Sg_request **o_srp);
static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking);
static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp);
static void sg_build_reserve(Sg_fd * sfp, int req_size);
static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
static Sg_fd *sg_add_sfp(Sg_device * sdp);
static void sg_remove_sfp(struct kref *);
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static Sg_device *sg_get_dev(int dev);
static void sg_device_destroy(struct kref *kref);
#define SZ_SG_HEADER sizeof(struct sg_header)
#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
#define SZ_SG_IOVEC sizeof(sg_iovec_t)
#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
#define sg_printk(prefix, sdp, fmt, a...) \
sdev_prefix_printk(prefix, (sdp)->device, \
(sdp)->disk->disk_name, fmt, ##a)
static int sg_allow_access(struct file *filp, unsigned char *cmd)
{
struct sg_fd *sfp = filp->private_data;
if (sfp->parentdp->device->type == TYPE_SCANNER)
return 0;
return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
}
static int
open_wait(Sg_device *sdp, int flags)
{
int retval = 0;
if (flags & O_EXCL) {
while (sdp->open_cnt > 0) {
mutex_unlock(&sdp->open_rel_lock);
retval = wait_event_interruptible(sdp->open_wait,
(atomic_read(&sdp->detaching) ||
!sdp->open_cnt));
mutex_lock(&sdp->open_rel_lock);
if (retval) /* -ERESTARTSYS */
return retval;
if (atomic_read(&sdp->detaching))
return -ENODEV;
}
} else {
while (sdp->exclude) {
mutex_unlock(&sdp->open_rel_lock);
retval = wait_event_interruptible(sdp->open_wait,
(atomic_read(&sdp->detaching) ||
!sdp->exclude));
mutex_lock(&sdp->open_rel_lock);
if (retval) /* -ERESTARTSYS */
return retval;
if (atomic_read(&sdp->detaching))
return -ENODEV;
}
}
return retval;
}
/* Returns 0 on success, else a negated errno value */
static int
sg_open(struct inode *inode, struct file *filp)
{
int dev = iminor(inode);
int flags = filp->f_flags;
struct request_queue *q;
Sg_device *sdp;
Sg_fd *sfp;
int retval;
nonseekable_open(inode, filp);
if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE)))
return -EPERM; /* Can't lock it with read only access */
sdp = sg_get_dev(dev);
if (IS_ERR(sdp))
return PTR_ERR(sdp);
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_open: flags=0x%x\n", flags));
/* This driver's module count bumped by fops_get in <linux/fs.h> */
/* Prevent the device driver from vanishing while we sleep */
retval = scsi_device_get(sdp->device);
if (retval)
goto sg_put;
retval = scsi_autopm_get_device(sdp->device);
if (retval)
goto sdp_put;
/* scsi_block_when_processing_errors() may block so bypass
* check if O_NONBLOCK. Permits SCSI commands to be issued
* during error recovery. Tread carefully. */
if (!((flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device))) {
retval = -ENXIO;
/* we are in error recovery for this device */
goto error_out;
}
mutex_lock(&sdp->open_rel_lock);
if (flags & O_NONBLOCK) {
if (flags & O_EXCL) {
if (sdp->open_cnt > 0) {
retval = -EBUSY;
goto error_mutex_locked;
}
} else {
if (sdp->exclude) {
retval = -EBUSY;
goto error_mutex_locked;
}
}
} else {
retval = open_wait(sdp, flags);
if (retval) /* -ERESTARTSYS or -ENODEV */
goto error_mutex_locked;
}
/* N.B. at this point we are holding the open_rel_lock */
if (flags & O_EXCL)
sdp->exclude = true;
if (sdp->open_cnt < 1) { /* no existing opens */
sdp->sgdebug = 0;
q = sdp->device->request_queue;
sdp->sg_tablesize = queue_max_segments(q);
}
sfp = sg_add_sfp(sdp);
if (IS_ERR(sfp)) {
retval = PTR_ERR(sfp);
goto out_undo;
}
filp->private_data = sfp;
sdp->open_cnt++;
mutex_unlock(&sdp->open_rel_lock);
retval = 0;
sg_put:
kref_put(&sdp->d_ref, sg_device_destroy);
return retval;
out_undo:
if (flags & O_EXCL) {
sdp->exclude = false; /* undo if error */
wake_up_interruptible(&sdp->open_wait);
}
error_mutex_locked:
mutex_unlock(&sdp->open_rel_lock);
error_out:
scsi_autopm_put_device(sdp->device);
sdp_put:
scsi_device_put(sdp->device);
goto sg_put;
}
/* Release resources associated with a successful sg_open()
* Returns 0 on success, else a negated errno value */
static int
sg_release(struct inode *inode, struct file *filp)
{
Sg_device *sdp;
Sg_fd *sfp;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n"));
mutex_lock(&sdp->open_rel_lock);
scsi_autopm_put_device(sdp->device);
kref_put(&sfp->f_ref, sg_remove_sfp);
sdp->open_cnt--;
/* possibly many open()s waiting on exlude clearing, start many;
* only open(O_EXCL)s wait on 0==open_cnt so only start one */
if (sdp->exclude) {
sdp->exclude = false;
wake_up_interruptible_all(&sdp->open_wait);
} else if (0 == sdp->open_cnt) {
wake_up_interruptible(&sdp->open_wait);
}
mutex_unlock(&sdp->open_rel_lock);
return 0;
}
static ssize_t
sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
{
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
int req_pack_id = -1;
sg_io_hdr_t *hp;
struct sg_header *old_hdr = NULL;
int retval = 0;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_read: count=%d\n", (int) count));
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
if (!old_hdr)
return -ENOMEM;
if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
if (old_hdr->reply_len < 0) {
if (count >= SZ_SG_IO_HDR) {
sg_io_hdr_t *new_hdr;
new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
if (!new_hdr) {
retval = -ENOMEM;
goto free_old_hdr;
}
retval =__copy_from_user
(new_hdr, buf, SZ_SG_IO_HDR);
req_pack_id = new_hdr->pack_id;
kfree(new_hdr);
if (retval) {
retval = -EFAULT;
goto free_old_hdr;
}
}
} else
req_pack_id = old_hdr->pack_id;
}
srp = sg_get_rq_mark(sfp, req_pack_id);
if (!srp) { /* now wait on packet to arrive */
if (atomic_read(&sdp->detaching)) {
retval = -ENODEV;
goto free_old_hdr;
}
if (filp->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto free_old_hdr;
}
retval = wait_event_interruptible(sfp->read_wait,
(atomic_read(&sdp->detaching) ||
(srp = sg_get_rq_mark(sfp, req_pack_id))));
if (atomic_read(&sdp->detaching)) {
retval = -ENODEV;
goto free_old_hdr;
}
if (retval) {
/* -ERESTARTSYS as signal hit process */
goto free_old_hdr;
}
}
if (srp->header.interface_id != '\0') {
retval = sg_new_read(sfp, buf, count, srp);
goto free_old_hdr;
}
hp = &srp->header;
if (old_hdr == NULL) {
old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
if (! old_hdr) {
retval = -ENOMEM;
goto free_old_hdr;
}
}
memset(old_hdr, 0, SZ_SG_HEADER);
old_hdr->reply_len = (int) hp->timeout;
old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
old_hdr->pack_id = hp->pack_id;
old_hdr->twelve_byte =
((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
old_hdr->target_status = hp->masked_status;
old_hdr->host_status = hp->host_status;
old_hdr->driver_status = hp->driver_status;
if ((CHECK_CONDITION & hp->masked_status) ||
(DRIVER_SENSE & hp->driver_status))
memcpy(old_hdr->sense_buffer, srp->sense_b,
sizeof (old_hdr->sense_buffer));
switch (hp->host_status) {
/* This setup of 'result' is for backward compatibility and is best
ignored by the user who should use target, host + driver status */
case DID_OK:
case DID_PASSTHROUGH:
case DID_SOFT_ERROR:
old_hdr->result = 0;
break;
case DID_NO_CONNECT:
case DID_BUS_BUSY:
case DID_TIME_OUT:
old_hdr->result = EBUSY;
break;
case DID_BAD_TARGET:
case DID_ABORT:
case DID_PARITY:
case DID_RESET:
case DID_BAD_INTR:
old_hdr->result = EIO;
break;
case DID_ERROR:
old_hdr->result = (srp->sense_b[0] == 0 &&
hp->masked_status == GOOD) ? 0 : EIO;
break;
default:
old_hdr->result = EIO;
break;
}
/* Now copy the result back to the user buffer. */
if (count >= SZ_SG_HEADER) {
if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
buf += SZ_SG_HEADER;
if (count > old_hdr->reply_len)
count = old_hdr->reply_len;
if (count > SZ_SG_HEADER) {
if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
}
} else
count = (old_hdr->result == 0) ? 0 : -EIO;
sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
retval = count;
free_old_hdr:
kfree(old_hdr);
return retval;
}
static ssize_t
sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
{
sg_io_hdr_t *hp = &srp->header;
int err = 0, err2;
int len;
if (count < SZ_SG_IO_HDR) {
err = -EINVAL;
goto err_out;
}
hp->sb_len_wr = 0;
if ((hp->mx_sb_len > 0) && hp->sbp) {
if ((CHECK_CONDITION & hp->masked_status) ||
(DRIVER_SENSE & hp->driver_status)) {
int sb_len = SCSI_SENSE_BUFFERSIZE;
sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
len = (len > sb_len) ? sb_len : len;
if (copy_to_user(hp->sbp, srp->sense_b, len)) {
err = -EFAULT;
goto err_out;
}
hp->sb_len_wr = len;
}
}
if (hp->masked_status || hp->host_status || hp->driver_status)
hp->info |= SG_INFO_CHECK;
if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
err = -EFAULT;
goto err_out;
}
err_out:
err2 = sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
return err ? : err2 ? : count;
}
static ssize_t
sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
{
int mxsize, cmd_size, k;
int input_size, blocking;
unsigned char opcode;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
struct sg_header old_hdr;
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
if (unlikely(uaccess_kernel()))
return -EINVAL;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_write: count=%d\n", (int) count));
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (!((filp->f_flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device)))
return -ENXIO;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT; /* protects following copy_from_user()s + get_user()s */
if (count < SZ_SG_HEADER)
return -EIO;
if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
return -EFAULT;
blocking = !(filp->f_flags & O_NONBLOCK);
if (old_hdr.reply_len < 0)
return sg_new_write(sfp, filp, buf, count,
blocking, 0, 0, NULL);
if (count < (SZ_SG_HEADER + 6))
return -EIO; /* The minimum scsi command length is 6 bytes. */
if (!(srp = sg_add_request(sfp))) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
"sg_write: queue full\n"));
return -EDOM;
}
buf += SZ_SG_HEADER;
__get_user(opcode, buf);
mutex_lock(&sfp->f_mutex);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */
} else {
cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12;
}
mutex_unlock(&sfp->f_mutex);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */
input_size = count - cmd_size;
mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
mxsize -= SZ_SG_HEADER;
input_size -= SZ_SG_HEADER;
if (input_size < 0) {
sg_remove_request(sfp, srp);
return -EIO; /* User did not pass enough bytes for this command. */
}
hp = &srp->header;
hp->interface_id = '\0'; /* indicator of old interface tunnelled */
hp->cmd_len = (unsigned char) cmd_size;
hp->iovec_count = 0;
hp->mx_sb_len = 0;
if (input_size > 0)
hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
else
hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
hp->dxfer_len = mxsize;
if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
(hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
hp->dxferp = (char __user *)buf + cmd_size;
else
hp->dxferp = NULL;
hp->sbp = NULL;
hp->timeout = old_hdr.reply_len; /* structure abuse ... */
hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL;
if (__copy_from_user(cmnd, buf, cmd_size))
return -EFAULT;
/*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
* but is is possible that the app intended SG_DXFER_TO_DEV, because there
* is a non-zero input_size, so emit a warning.
*/
if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
printk_ratelimited(KERN_WARNING
"sg_write: data in/out %d/%d bytes "
"for SCSI command 0x%x-- guessing "
"data in;\n program %s not setting "
"count and/or reply_len properly\n",
old_hdr.reply_len - (int)SZ_SG_HEADER,
input_size, (unsigned int) cmnd[0],
current->comm);
}
k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
return (k < 0) ? k : count;
}
static ssize_t
sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
size_t count, int blocking, int read_only, int sg_io_owned,
Sg_request **o_srp)
{
int k;
Sg_request *srp;
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
int timeout;
unsigned long ul_timeout;
if (count < SZ_SG_IO_HDR)
return -EINVAL;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT; /* protects following copy_from_user()s + get_user()s */
sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
if (!(srp = sg_add_request(sfp))) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_new_write: queue full\n"));
return -EDOM;
}
srp->sg_io_owned = sg_io_owned;
hp = &srp->header;
if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
sg_remove_request(sfp, srp);
return -EFAULT;
}
if (hp->interface_id != 'S') {
sg_remove_request(sfp, srp);
return -ENOSYS;
}
if (hp->flags & SG_FLAG_MMAP_IO) {
if (hp->dxfer_len > sfp->reserve.bufflen) {
sg_remove_request(sfp, srp);
return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
}
if (hp->flags & SG_FLAG_DIRECT_IO) {
sg_remove_request(sfp, srp);
return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
}
if (sfp->res_in_use) {
sg_remove_request(sfp, srp);
return -EBUSY; /* reserve buffer already being used */
}
}
ul_timeout = msecs_to_jiffies(srp->header.timeout);
timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
sg_remove_request(sfp, srp);
return -EMSGSIZE;
}
if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
sg_remove_request(sfp, srp);
return -EFAULT; /* protects following copy_from_user()s + get_user()s */
}
if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
sg_remove_request(sfp, srp);
return -EFAULT;
}
if (read_only && sg_allow_access(file, cmnd)) {
sg_remove_request(sfp, srp);
return -EPERM;
}
k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
if (k < 0)
return k;
if (o_srp)
*o_srp = srp;
return count;
}
static int
sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking)
{
int k, at_head;
Sg_device *sdp = sfp->parentdp;
sg_io_hdr_t *hp = &srp->header;
srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
hp->status = 0;
hp->masked_status = 0;
hp->msg_status = 0;
hp->info = 0;
hp->host_status = 0;
hp->driver_status = 0;
hp->resid = 0;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
if (hp->dxfer_len >= SZ_256M)
return -EINVAL;
k = sg_start_req(srp, cmnd);
if (k) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_common_write: start_req err=%d\n", k));
sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
return k; /* probably out of space --> ENOMEM */
}
if (atomic_read(&sdp->detaching)) {
if (srp->bio) {
scsi_req_free_cmd(scsi_req(srp->rq));
blk_end_request_all(srp->rq, BLK_STS_IOERR);
srp->rq = NULL;
}
sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
return -ENODEV;
}
hp->duration = jiffies_to_msecs(jiffies);
if (hp->interface_id != '\0' && /* v3 (or later) interface */
(SG_FLAG_Q_AT_TAIL & hp->flags))
at_head = 0;
else
at_head = 1;
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
srp->rq, at_head, sg_rq_end_io);
return 0;
}
static int srp_done(Sg_fd *sfp, Sg_request *srp)
{
unsigned long flags;
int ret;
read_lock_irqsave(&sfp->rq_list_lock, flags);
ret = srp->done;
read_unlock_irqrestore(&sfp->rq_list_lock, flags);
return ret;
}
static int max_sectors_bytes(struct request_queue *q)
{
unsigned int max_sectors = queue_max_sectors(q);
max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9);
return max_sectors << 9;
}
static void
sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
{
Sg_request *srp;
int val;
unsigned int ms;
val = 0;
list_for_each_entry(srp, &sfp->rq_list, entry) {
if (val > SG_MAX_QUEUE)
break;
rinfo[val].req_state = srp->done + 1;
rinfo[val].problem =
srp->header.masked_status &
srp->header.host_status &
srp->header.driver_status;
if (srp->done)
rinfo[val].duration =
srp->header.duration;
else {
ms = jiffies_to_msecs(jiffies);
rinfo[val].duration =
(ms > srp->header.duration) ?
(ms - srp->header.duration) : 0;
}
rinfo[val].orphan = srp->orphan;
rinfo[val].sg_io_owned = srp->sg_io_owned;
rinfo[val].pack_id = srp->header.pack_id;
rinfo[val].usr_ptr = srp->header.usr_ptr;
val++;
}
}
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
void __user *p = (void __user *)arg;
int __user *ip = p;
int result, val, read_only;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
unsigned long iflags;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_ioctl: cmd=0x%x\n", (int) cmd_in));
read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
switch (cmd_in) {
case SG_IO:
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (!scsi_block_when_processing_errors(sdp->device))
return -ENXIO;
if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
return -EFAULT;
result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
1, read_only, 1, &srp);
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
(srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
if (atomic_read(&sdp->detaching))
return -ENODEV;
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
srp->done = 2;
write_unlock_irq(&sfp->rq_list_lock);
result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
return (result < 0) ? result : 0;
}
srp->orphan = 1;
write_unlock_irq(&sfp->rq_list_lock);
return result; /* -ERESTARTSYS because signal hit process */
case SG_SET_TIMEOUT:
result = get_user(val, ip);
if (result)
return result;
if (val < 0)
return -EIO;
if (val >= mult_frac((s64)INT_MAX, USER_HZ, HZ))
val = min_t(s64, mult_frac((s64)INT_MAX, USER_HZ, HZ),
INT_MAX);
sfp->timeout_user = val;
sfp->timeout = mult_frac(val, HZ, USER_HZ);
return 0;
case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
/* strange ..., for backward compatibility */
return sfp->timeout_user;
case SG_SET_FORCE_LOW_DMA:
/*
* N.B. This ioctl never worked properly, but failed to
* return an error value. So returning '0' to keep compability
* with legacy applications.
*/
return 0;
case SG_GET_LOW_DMA:
return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
case SG_GET_SCSI_ID:
if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
return -EFAULT;
else {
sg_scsi_id_t __user *sg_idp = p;
if (atomic_read(&sdp->detaching))
return -ENODEV;
__put_user((int) sdp->device->host->host_no,
&sg_idp->host_no);
__put_user((int) sdp->device->channel,
&sg_idp->channel);
__put_user((int) sdp->device->id, &sg_idp->scsi_id);
__put_user((int) sdp->device->lun, &sg_idp->lun);
__put_user((int) sdp->device->type, &sg_idp->scsi_type);
__put_user((short) sdp->device->host->cmd_per_lun,
&sg_idp->h_cmd_per_lun);
__put_user((short) sdp->device->queue_depth,
&sg_idp->d_queue_depth);
__put_user(0, &sg_idp->unused[0]);
__put_user(0, &sg_idp->unused[1]);
return 0;
}
case SG_SET_FORCE_PACK_ID:
result = get_user(val, ip);
if (result)
return result;
sfp->force_packid = val ? 1 : 0;
return 0;
case SG_GET_PACK_ID:
if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
__put_user(srp->header.pack_id, ip);
return 0;
}
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
__put_user(-1, ip);
return 0;
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
val = 0;
list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned))
++val;
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return put_user(val, ip);
case SG_GET_SG_TABLESIZE:
return put_user(sdp->sg_tablesize, ip);
case SG_SET_RESERVED_SIZE:
result = get_user(val, ip);
if (result)
return result;
if (val < 0)
return -EINVAL;
val = min_t(int, val,
max_sectors_bytes(sdp->device->request_queue));
mutex_lock(&sfp->f_mutex);
if (val != sfp->reserve.bufflen) {
if (sfp->mmap_called ||
sfp->res_in_use) {
mutex_unlock(&sfp->f_mutex);
return -EBUSY;
}
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
mutex_unlock(&sfp->f_mutex);
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
max_sectors_bytes(sdp->device->request_queue));
return put_user(val, ip);
case SG_SET_COMMAND_Q:
result = get_user(val, ip);
if (result)
return result;
sfp->cmd_q = val ? 1 : 0;
return 0;
case SG_GET_COMMAND_Q:
return put_user((int) sfp->cmd_q, ip);
case SG_SET_KEEP_ORPHAN:
result = get_user(val, ip);
if (result)
return result;
sfp->keep_orphan = val;
return 0;
case SG_GET_KEEP_ORPHAN:
return put_user((int) sfp->keep_orphan, ip);
case SG_NEXT_CMD_LEN:
result = get_user(val, ip);
if (result)
return result;
if (val > SG_MAX_CDB_SIZE)
return -ENOMEM;
sfp->next_cmd_len = (val > 0) ? val : 0;
return 0;
case SG_GET_VERSION_NUM:
return put_user(sg_version_num, ip);
case SG_GET_ACCESS_COUNT:
/* faked - we don't have a real access count anymore */
val = (sdp->device ? 1 : 0);
return put_user(val, ip);
case SG_GET_REQUEST_TABLE:
if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
return -EFAULT;
else {
sg_req_info_t *rinfo;
rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
GFP_KERNEL);
if (!rinfo)
return -ENOMEM;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
sg_fill_request_table(sfp, rinfo);
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
result = __copy_to_user(p, rinfo,
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
result = result ? -EFAULT : 0;
kfree(rinfo);
return result;
}
case SG_EMULATED_HOST:
if (atomic_read(&sdp->detaching))
return -ENODEV;
return put_user(sdp->device->host->hostt->emulated, ip);
case SCSI_IOCTL_SEND_COMMAND:
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (read_only) {
unsigned char opcode = WRITE_6;
Scsi_Ioctl_Command __user *siocp = p;
if (copy_from_user(&opcode, siocp->data, 1))
return -EFAULT;
if (sg_allow_access(filp, &opcode))
return -EPERM;
}
return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
return result;
sdp->sgdebug = (char) val;
return 0;
case BLKSECTGET:
return put_user(max_sectors_bytes(sdp->device->request_queue),
ip);
case BLKTRACESETUP:
return blk_trace_setup(sdp->device->request_queue,
sdp->disk->disk_name,
MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
NULL, p);
case BLKTRACESTART:
return blk_trace_startstop(sdp->device->request_queue, 1);
case BLKTRACESTOP:
return blk_trace_startstop(sdp->device->request_queue, 0);
case BLKTRACETEARDOWN:
return blk_trace_remove(sdp->device->request_queue);
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
case SCSI_IOCTL_PROBE_HOST:
case SG_GET_TRANSFORM:
case SG_SCSI_RESET:
if (atomic_read(&sdp->detaching))
return -ENODEV;
break;
default:
if (read_only)
return -EPERM; /* don't know so take safe approach */
break;
}
result = scsi_ioctl_block_when_processing_errors(sdp->device,
cmd_in, filp->f_flags & O_NDELAY);
if (result)
return result;
return scsi_ioctl(sdp->device, cmd_in, p);
}
#ifdef CONFIG_COMPAT
static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
Sg_device *sdp;
Sg_fd *sfp;
struct scsi_device *sdev;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
sdev = sdp->device;
if (sdev->host->hostt->compat_ioctl) {
int ret;
ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
return ret;
}
return -ENOIOCTLCMD;
}
#endif
static unsigned int
sg_poll(struct file *filp, poll_table * wait)
{
unsigned int res = 0;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
int count = 0;
unsigned long iflags;
sfp = filp->private_data;
if (!sfp)
return POLLERR;
sdp = sfp->parentdp;
if (!sdp)
return POLLERR;
poll_wait(filp, &sfp->read_wait, wait);
read_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(srp, &sfp->rq_list, entry) {
/* if any read waiting, flag it */
if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
res = POLLIN | POLLRDNORM;
++count;
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (atomic_read(&sdp->detaching))
res |= POLLHUP;
else if (!sfp->cmd_q) {
if (0 == count)
res |= POLLOUT | POLLWRNORM;
} else if (count < SG_MAX_QUEUE)
res |= POLLOUT | POLLWRNORM;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_poll: res=0x%x\n", (int) res));
return res;
}
static int
sg_fasync(int fd, struct file *filp, int mode)
{
Sg_device *sdp;
Sg_fd *sfp;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_fasync: mode=%d\n", mode));
return fasync_helper(fd, filp, mode, &sfp->async_qp);
}
static int
sg_vma_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
Sg_fd *sfp;
unsigned long offset, len, sa;
Sg_scatter_hold *rsv_schp;
int k, length;
if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
return VM_FAULT_SIGBUS;
rsv_schp = &sfp->reserve;
offset = vmf->pgoff << PAGE_SHIFT;
if (offset >= rsv_schp->bufflen)
return VM_FAULT_SIGBUS;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
"sg_vma_fault: offset=%lu, scatg=%d\n",
offset, rsv_schp->k_use_sg));
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
len = vma->vm_end - sa;
len = (len < length) ? len : length;
if (offset < len) {
struct page *page = nth_page(rsv_schp->pages[k],
offset >> PAGE_SHIFT);
get_page(page); /* increment page count */
vmf->page = page;
return 0; /* success */
}
sa += len;
offset -= len;
}
return VM_FAULT_SIGBUS;
}
static const struct vm_operations_struct sg_mmap_vm_ops = {
.fault = sg_vma_fault,
};
static int
sg_mmap(struct file *filp, struct vm_area_struct *vma)
{
Sg_fd *sfp;
unsigned long req_sz, len, sa;
Sg_scatter_hold *rsv_schp;
int k, length;
int ret = 0;
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
return -ENXIO;
req_sz = vma->vm_end - vma->vm_start;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
"sg_mmap starting, vm_start=%p, len=%d\n",
(void *) vma->vm_start, (int) req_sz));
if (vma->vm_pgoff)
return -EINVAL; /* want no offset */
rsv_schp = &sfp->reserve;
mutex_lock(&sfp->f_mutex);
if (req_sz > rsv_schp->bufflen) {
ret = -ENOMEM; /* cannot map more than reserved buffer */
goto out;
}
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
len = vma->vm_end - sa;
len = (len < length) ? len : length;
sa += len;
}
sfp->mmap_called = 1;
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
out:
mutex_unlock(&sfp->f_mutex);
return ret;
}
static void
sg_rq_end_io_usercontext(struct work_struct *work)
{
struct sg_request *srp = container_of(work, struct sg_request, ew.work);
struct sg_fd *sfp = srp->parentfp;
sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
kref_put(&sfp->f_ref, sg_remove_sfp);
}
/*
* This function is a "bottom half" handler that is called by the mid
* level when a command is completed (or has failed).
*/
static void
sg_rq_end_io(struct request *rq, blk_status_t status)
{
struct sg_request *srp = rq->end_io_data;
struct scsi_request *req = scsi_req(rq);
Sg_device *sdp;
Sg_fd *sfp;
unsigned long iflags;
unsigned int ms;
char *sense;
int result, resid, done = 1;
if (WARN_ON(srp->done != 0))
return;
sfp = srp->parentfp;
if (WARN_ON(sfp == NULL))
return;
sdp = sfp->parentdp;
if (unlikely(atomic_read(&sdp->detaching)))
pr_info("%s: device detaching\n", __func__);
sense = req->sense;
result = req->result;
resid = req->resid_len;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_cmd_done: pack_id=%d, res=0x%x\n",
srp->header.pack_id, result));
srp->header.resid = resid;
ms = jiffies_to_msecs(jiffies);
srp->header.duration = (ms > srp->header.duration) ?
(ms - srp->header.duration) : 0;
if (0 != result) {
struct scsi_sense_hdr sshdr;
srp->header.status = 0xff & result;
srp->header.masked_status = status_byte(result);
srp->header.msg_status = msg_byte(result);
srp->header.host_status = host_byte(result);
srp->header.driver_status = driver_byte(result);
if ((sdp->sgdebug > 0) &&
((CHECK_CONDITION == srp->header.masked_status) ||
(COMMAND_TERMINATED == srp->header.masked_status)))
__scsi_print_sense(sdp->device, __func__, sense,
SCSI_SENSE_BUFFERSIZE);
/* Following if statement is a patch supplied by Eric Youngdale */
if (driver_byte(result) != 0
&& scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
&& !scsi_sense_is_deferred(&sshdr)
&& sshdr.sense_key == UNIT_ATTENTION
&& sdp->device->removable) {
/* Detected possible disc change. Set the bit - this */
/* may be used if there are filesystems using this device */
sdp->device->changed = 1;
}
}
if (req->sense_len)
memcpy(srp->sense_b, req->sense, SCSI_SENSE_BUFFERSIZE);
/* Rely on write phase to clean out srp status values, so no "else" */
/*
* Free the request as soon as it is complete so that its resources
* can be reused without waiting for userspace to read() the
* result. But keep the associated bio (if any) around until
* blk_rq_unmap_user() can be called from user context.
*/
srp->rq = NULL;
scsi_req_free_cmd(scsi_req(rq));
__blk_put_request(rq->q, rq);
write_lock_irqsave(&sfp->rq_list_lock, iflags);
if (unlikely(srp->orphan)) {
if (sfp->keep_orphan)
srp->sg_io_owned = 0;
else
done = 0;
}
srp->done = done;
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (likely(done)) {
/* Now wake up any sg_read() that is waiting for this
* packet.
*/
wake_up_interruptible(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
kref_put(&sfp->f_ref, sg_remove_sfp);
} else {
INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
schedule_work(&srp->ew.work);
}
}
static const struct file_operations sg_fops = {
.owner = THIS_MODULE,
.read = sg_read,
.write = sg_write,
.poll = sg_poll,
.unlocked_ioctl = sg_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sg_compat_ioctl,
#endif
.open = sg_open,
.mmap = sg_mmap,
.release = sg_release,
.fasync = sg_fasync,
.llseek = no_llseek,
};
static struct class *sg_sysfs_class;
static int sg_sysfs_valid = 0;
static Sg_device *
sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
{
struct request_queue *q = scsidp->request_queue;
Sg_device *sdp;
unsigned long iflags;
int error;
u32 k;
sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
if (!sdp) {
sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device "
"failure\n", __func__);
return ERR_PTR(-ENOMEM);
}
idr_preload(GFP_KERNEL);
write_lock_irqsave(&sg_index_lock, iflags);
error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT);
if (error < 0) {
if (error == -ENOSPC) {
sdev_printk(KERN_WARNING, scsidp,
"Unable to attach sg device type=%d, minor number exceeds %d\n",
scsidp->type, SG_MAX_DEVS - 1);
error = -ENODEV;
} else {
sdev_printk(KERN_WARNING, scsidp, "%s: idr "
"allocation Sg_device failure: %d\n",
__func__, error);
}
goto out_unlock;
}
k = error;
SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp,
"sg_alloc: dev=%d \n", k));
sprintf(disk->disk_name, "sg%d", k);
disk->first_minor = k;
sdp->disk = disk;
sdp->device = scsidp;
mutex_init(&sdp->open_rel_lock);
INIT_LIST_HEAD(&sdp->sfds);
init_waitqueue_head(&sdp->open_wait);
atomic_set(&sdp->detaching, 0);
rwlock_init(&sdp->sfd_lock);
sdp->sg_tablesize = queue_max_segments(q);
sdp->index = k;
kref_init(&sdp->d_ref);
error = 0;
out_unlock:
write_unlock_irqrestore(&sg_index_lock, iflags);
idr_preload_end();
if (error) {
kfree(sdp);
return ERR_PTR(error);
}
return sdp;
}
static int
sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
struct gendisk *disk;
Sg_device *sdp = NULL;
struct cdev * cdev = NULL;
int error;
unsigned long iflags;
disk = alloc_disk(1);
if (!disk) {
pr_warn("%s: alloc_disk failed\n", __func__);
return -ENOMEM;
}
disk->major = SCSI_GENERIC_MAJOR;
error = -ENOMEM;
cdev = cdev_alloc();
if (!cdev) {
pr_warn("%s: cdev_alloc failed\n", __func__);
goto out;
}
cdev->owner = THIS_MODULE;
cdev->ops = &sg_fops;
sdp = sg_alloc(disk, scsidp);
if (IS_ERR(sdp)) {
pr_warn("%s: sg_alloc failed\n", __func__);
error = PTR_ERR(sdp);
goto out;
}
error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
if (error)
goto cdev_add_err;
sdp->cdev = cdev;
if (sg_sysfs_valid) {
struct device *sg_class_member;
sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
MKDEV(SCSI_GENERIC_MAJOR,
sdp->index),
sdp, "%s", disk->disk_name);
if (IS_ERR(sg_class_member)) {
pr_err("%s: device_create failed\n", __func__);
error = PTR_ERR(sg_class_member);
goto cdev_add_err;
}
error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
&sg_class_member->kobj, "generic");
if (error)
pr_err("%s: unable to make symlink 'generic' back "
"to sg%d\n", __func__, sdp->index);
} else
pr_warn("%s: sg_sys Invalid\n", __func__);
sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d "
"type %d\n", sdp->index, scsidp->type);
dev_set_drvdata(cl_dev, sdp);
return 0;
cdev_add_err:
write_lock_irqsave(&sg_index_lock, iflags);
idr_remove(&sg_index_idr, sdp->index);
write_unlock_irqrestore(&sg_index_lock, iflags);
kfree(sdp);
out:
put_disk(disk);
if (cdev)
cdev_del(cdev);
return error;
}
static void
sg_device_destroy(struct kref *kref)
{
struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
unsigned long flags;
/* CAUTION! Note that the device can still be found via idr_find()
* even though the refcount is 0. Therefore, do idr_remove() BEFORE
* any other cleanup.
*/
write_lock_irqsave(&sg_index_lock, flags);
idr_remove(&sg_index_idr, sdp->index);
write_unlock_irqrestore(&sg_index_lock, flags);
SCSI_LOG_TIMEOUT(3,
sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
put_disk(sdp->disk);
kfree(sdp);
}
static void
sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
Sg_device *sdp = dev_get_drvdata(cl_dev);
unsigned long iflags;
Sg_fd *sfp;
int val;
if (!sdp)
return;
/* want sdp->detaching non-zero as soon as possible */
val = atomic_inc_return(&sdp->detaching);
if (val > 1)
return; /* only want to do following once per device */
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"%s\n", __func__));
read_lock_irqsave(&sdp->sfd_lock, iflags);
list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
wake_up_interruptible_all(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
}
wake_up_interruptible_all(&sdp->open_wait);
read_unlock_irqrestore(&sdp->sfd_lock, iflags);
sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
cdev_del(sdp->cdev);
sdp->cdev = NULL;
kref_put(&sdp->d_ref, sg_device_destroy);
}
module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
module_param_named(def_reserved_size, def_reserved_size, int,
S_IRUGO | S_IWUSR);
module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
MODULE_AUTHOR("Douglas Gilbert");
MODULE_DESCRIPTION("SCSI generic (sg) driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SG_VERSION_STR);
MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
"size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
static int __init
init_sg(void)
{
int rc;
if (scatter_elem_sz < PAGE_SIZE) {
scatter_elem_sz = PAGE_SIZE;
scatter_elem_sz_prev = scatter_elem_sz;
}
if (def_reserved_size >= 0)
sg_big_buff = def_reserved_size;
else
def_reserved_size = sg_big_buff;
rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
SG_MAX_DEVS, "sg");
if (rc)
return rc;
sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
if ( IS_ERR(sg_sysfs_class) ) {
rc = PTR_ERR(sg_sysfs_class);
goto err_out;
}
sg_sysfs_valid = 1;
rc = scsi_register_interface(&sg_interface);
if (0 == rc) {
#ifdef CONFIG_SCSI_PROC_FS
sg_proc_init();
#endif /* CONFIG_SCSI_PROC_FS */
return 0;
}
class_destroy(sg_sysfs_class);
err_out:
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
return rc;
}
static void __exit
exit_sg(void)
{
#ifdef CONFIG_SCSI_PROC_FS
sg_proc_cleanup();
#endif /* CONFIG_SCSI_PROC_FS */
scsi_unregister_interface(&sg_interface);
class_destroy(sg_sysfs_class);
sg_sysfs_valid = 0;
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
SG_MAX_DEVS);
idr_destroy(&sg_index_idr);
}
static int
sg_start_req(Sg_request *srp, unsigned char *cmd)
{
int res;
struct request *rq;
struct scsi_request *req;
Sg_fd *sfp = srp->parentfp;
sg_io_hdr_t *hp = &srp->header;
int dxfer_len = (int) hp->dxfer_len;
int dxfer_dir = hp->dxfer_direction;
unsigned int iov_count = hp->iovec_count;
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
struct request_queue *q = sfp->parentdp->device->request_queue;
struct rq_map_data *md, map_data;
int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
unsigned char *long_cmdp = NULL;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_start_req: dxfer_len=%d\n",
dxfer_len));
if (hp->cmd_len > BLK_MAX_CDB) {
long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
if (!long_cmdp)
return -ENOMEM;
}
/*
* NOTE
*
* With scsi-mq enabled, there are a fixed number of preallocated
* requests equal in number to shost->can_queue. If all of the
* preallocated requests are already in use, then using GFP_ATOMIC with
* blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
* will cause blk_get_request() to sleep until an active command
* completes, freeing up a request. Neither option is ideal, but
* GFP_KERNEL is the better choice to prevent userspace from getting an
* unexpected EWOULDBLOCK.
*
* With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
* does not sleep except under memory pressure.
*/
rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(rq)) {
kfree(long_cmdp);
return PTR_ERR(rq);
}
req = scsi_req(rq);
if (hp->cmd_len > BLK_MAX_CDB)
req->cmd = long_cmdp;
memcpy(req->cmd, cmd, hp->cmd_len);
req->cmd_len = hp->cmd_len;
srp->rq = rq;
rq->end_io_data = srp;
req->retries = SG_DEFAULT_RETRIES;
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
return 0;
if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
!sfp->parentdp->device->host->unchecked_isa_dma &&
blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
md = NULL;
else
md = &map_data;
if (md) {
mutex_lock(&sfp->f_mutex);
if (dxfer_len <= rsv_schp->bufflen &&
!sfp->res_in_use) {
sfp->res_in_use = 1;
sg_link_reserve(sfp, srp, dxfer_len);
} else if (hp->flags & SG_FLAG_MMAP_IO) {
res = -EBUSY; /* sfp->res_in_use == 1 */
if (dxfer_len > rsv_schp->bufflen)
res = -ENOMEM;
mutex_unlock(&sfp->f_mutex);
return res;
} else {
res = sg_build_indirect(req_schp, sfp, dxfer_len);
if (res) {
mutex_unlock(&sfp->f_mutex);
return res;
}
}
mutex_unlock(&sfp->f_mutex);
md->pages = req_schp->pages;
md->page_order = req_schp->page_order;
md->nr_entries = req_schp->k_use_sg;
md->offset = 0;
md->null_mapped = hp->dxferp ? 0 : 1;
if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
md->from_user = 1;
else
md->from_user = 0;
}
if (iov_count) {
struct iovec *iov = NULL;
struct iov_iter i;
res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
if (res < 0)
return res;
iov_iter_truncate(&i, hp->dxfer_len);
if (!iov_iter_count(&i)) {
kfree(iov);
return -EINVAL;
}
res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
kfree(iov);
} else
res = blk_rq_map_user(q, rq, md, hp->dxferp,
hp->dxfer_len, GFP_ATOMIC);
if (!res) {
srp->bio = rq->bio;
if (!md) {
req_schp->dio_in_use = 1;
hp->info |= SG_INFO_DIRECT_IO;
}
}
return res;
}
static int
sg_finish_rem_req(Sg_request *srp)
{
int ret = 0;
Sg_fd *sfp = srp->parentfp;
Sg_scatter_hold *req_schp = &srp->data;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_finish_rem_req: res_used=%d\n",
(int) srp->res_used));
if (srp->bio)
ret = blk_rq_unmap_user(srp->bio);
if (srp->rq) {
scsi_req_free_cmd(scsi_req(srp->rq));
blk_put_request(srp->rq);
}
if (srp->res_used)
sg_unlink_reserve(sfp, srp);
else
sg_remove_scat(sfp, req_schp);
return ret;
}
static int
sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
{
int sg_bufflen = tablesize * sizeof(struct page *);
gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
schp->pages = kzalloc(sg_bufflen, gfp_flags);
if (!schp->pages)
return -ENOMEM;
schp->sglist_len = sg_bufflen;
return tablesize; /* number of scat_gath elements allocated */
}
static int
sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
{
int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
int sg_tablesize = sfp->parentdp->sg_tablesize;
int blk_size = buff_size, order;
gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
struct sg_device *sdp = sfp->parentdp;
if (blk_size < 0)
return -EFAULT;
if (0 == blk_size)
++blk_size; /* don't know why */
/* round request up to next highest SG_SECTOR_SZ byte boundary */
blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: buff_size=%d, blk_size=%d\n",
buff_size, blk_size));
/* N.B. ret_sz carried into this block ... */
mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
if (mx_sc_elems < 0)
return mx_sc_elems; /* most likely -ENOMEM */
num = scatter_elem_sz;
if (unlikely(num != scatter_elem_sz_prev)) {
if (num < PAGE_SIZE) {
scatter_elem_sz = PAGE_SIZE;
scatter_elem_sz_prev = PAGE_SIZE;
} else
scatter_elem_sz_prev = num;
}
if (sdp->device->host->unchecked_isa_dma)
gfp_mask |= GFP_DMA;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
gfp_mask |= __GFP_ZERO;
order = get_order(num);
retry:
ret_sz = 1 << (PAGE_SHIFT + order);
for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
k++, rem_sz -= ret_sz) {
num = (rem_sz > scatter_elem_sz_prev) ?
scatter_elem_sz_prev : rem_sz;
schp->pages[k] = alloc_pages(gfp_mask, order);
if (!schp->pages[k])
goto out;
if (num == scatter_elem_sz_prev) {
if (unlikely(ret_sz > scatter_elem_sz_prev)) {
scatter_elem_sz = ret_sz;
scatter_elem_sz_prev = ret_sz;
}
}
SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: k=%d, num=%d, ret_sz=%d\n",
k, num, ret_sz));
} /* end of for loop */
schp->page_order = order;
schp->k_use_sg = k;
SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: k_use_sg=%d, rem_sz=%d\n",
k, rem_sz));
schp->bufflen = blk_size;
if (rem_sz > 0) /* must have failed */
return -ENOMEM;
return 0;
out:
for (i = 0; i < k; i++)
__free_pages(schp->pages[i], order);
if (--order >= 0)
goto retry;
return -ENOMEM;
}
static void
sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp)
{
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
if (schp->pages && schp->sglist_len > 0) {
if (!schp->dio_in_use) {
int k;
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
SCSI_LOG_TIMEOUT(5,
sg_printk(KERN_INFO, sfp->parentdp,
"sg_remove_scat: k=%d, pg=0x%p\n",
k, schp->pages[k]));
__free_pages(schp->pages[k], schp->page_order);
}
kfree(schp->pages);
}
}
memset(schp, 0, sizeof (*schp));
}
static int
sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
{
Sg_scatter_hold *schp = &srp->data;
int k, num;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
"sg_read_oxfer: num_read_xfer=%d\n",
num_read_xfer));
if ((!outp) || (num_read_xfer <= 0))
return 0;
num = 1 << (PAGE_SHIFT + schp->page_order);
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
if (num > num_read_xfer) {
if (__copy_to_user(outp, page_address(schp->pages[k]),
num_read_xfer))
return -EFAULT;
break;
} else {
if (__copy_to_user(outp, page_address(schp->pages[k]),
num))
return -EFAULT;
num_read_xfer -= num;
if (num_read_xfer <= 0)
break;
outp += num;
}
}
return 0;
}
static void
sg_build_reserve(Sg_fd * sfp, int req_size)
{
Sg_scatter_hold *schp = &sfp->reserve;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_reserve: req_size=%d\n", req_size));
do {
if (req_size < PAGE_SIZE)
req_size = PAGE_SIZE;
if (0 == sg_build_indirect(schp, sfp, req_size))
return;
else
sg_remove_scat(sfp, schp);
req_size >>= 1; /* divide by 2 */
} while (req_size > (PAGE_SIZE / 2));
}
static void
sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
{
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
int k, num, rem;
srp->res_used = 1;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_link_reserve: size=%d\n", size));
rem = size;
num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
for (k = 0; k < rsv_schp->k_use_sg; k++) {
if (rem <= num) {
req_schp->k_use_sg = k + 1;
req_schp->sglist_len = rsv_schp->sglist_len;
req_schp->pages = rsv_schp->pages;
req_schp->bufflen = size;
req_schp->page_order = rsv_schp->page_order;
break;
} else
rem -= num;
}
if (k >= rsv_schp->k_use_sg)
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_link_reserve: BAD size\n"));
}
static void
sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
{
Sg_scatter_hold *req_schp = &srp->data;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
"sg_unlink_reserve: req->k_use_sg=%d\n",
(int) req_schp->k_use_sg));
req_schp->k_use_sg = 0;
req_schp->bufflen = 0;
req_schp->pages = NULL;
req_schp->page_order = 0;
req_schp->sglist_len = 0;
srp->res_used = 0;
/* Called without mutex lock to avoid deadlock */
sfp->res_in_use = 0;
}
static Sg_request *
sg_get_rq_mark(Sg_fd * sfp, int pack_id)
{
Sg_request *resp;
unsigned long iflags;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(resp, &sfp->rq_list, entry) {
/* look for requests that are ready + not SG_IO owned */
if ((1 == resp->done) && (!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
resp->done = 2; /* guard against other readers */
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return resp;
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return NULL;
}
/* always adds to end of list */
static Sg_request *
sg_add_request(Sg_fd * sfp)
{
int k;
unsigned long iflags;
Sg_request *rp = sfp->req_arr;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
if (!list_empty(&sfp->rq_list)) {
if (!sfp->cmd_q)
goto out_unlock;
for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
if (!rp->parentfp)
break;
}
if (k >= SG_MAX_QUEUE)
goto out_unlock;
}
memset(rp, 0, sizeof (Sg_request));
rp->parentfp = sfp;
rp->header.duration = jiffies_to_msecs(jiffies);
list_add_tail(&rp->entry, &sfp->rq_list);
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return rp;
out_unlock:
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return NULL;
}
/* Return of 1 for found; 0 for not found */
static int
sg_remove_request(Sg_fd * sfp, Sg_request * srp)
{
unsigned long iflags;
int res = 0;
if (!sfp || !srp || list_empty(&sfp->rq_list))
return res;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
if (!list_empty(&srp->entry)) {
list_del(&srp->entry);
srp->parentfp = NULL;
res = 1;
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return res;
}
static Sg_fd *
sg_add_sfp(Sg_device * sdp)
{
Sg_fd *sfp;
unsigned long iflags;
int bufflen;
sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
if (!sfp)
return ERR_PTR(-ENOMEM);
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
INIT_LIST_HEAD(&sfp->rq_list);
kref_init(&sfp->f_ref);
mutex_init(&sfp->f_mutex);
sfp->timeout = SG_DEFAULT_TIMEOUT;
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
sfp->cmd_q = SG_DEF_COMMAND_Q;
sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
sfp->parentdp = sdp;
write_lock_irqsave(&sdp->sfd_lock, iflags);
if (atomic_read(&sdp->detaching)) {
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
return ERR_PTR(-ENODEV);
}
list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_add_sfp: sfp=0x%p\n", sfp));
if (unlikely(sg_big_buff != def_reserved_size))
sg_big_buff = def_reserved_size;
bufflen = min_t(int, sg_big_buff,
max_sectors_bytes(sdp->device->request_queue));
sg_build_reserve(sfp, bufflen);
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
sfp->reserve.bufflen,
sfp->reserve.k_use_sg));
kref_get(&sdp->d_ref);
__module_get(THIS_MODULE);
return sfp;
}
static void
sg_remove_sfp_usercontext(struct work_struct *work)
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
Sg_request *srp;
unsigned long iflags;
/* Cleanup any responses which were never read(). */
write_lock_irqsave(&sfp->rq_list_lock, iflags);
while (!list_empty(&sfp->rq_list)) {
srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
sg_finish_rem_req(srp);
list_del(&srp->entry);
srp->parentfp = NULL;
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (sfp->reserve.bufflen > 0) {
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
"sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
(int) sfp->reserve.bufflen,
(int) sfp->reserve.k_use_sg));
sg_remove_scat(sfp, &sfp->reserve);
}
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
"sg_remove_sfp: sfp=0x%p\n", sfp));
kfree(sfp);
scsi_device_put(sdp->device);
kref_put(&sdp->d_ref, sg_device_destroy);
module_put(THIS_MODULE);
}
static void
sg_remove_sfp(struct kref *kref)
{
struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
struct sg_device *sdp = sfp->parentdp;
unsigned long iflags;
write_lock_irqsave(&sdp->sfd_lock, iflags);
list_del(&sfp->sfd_siblings);
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
schedule_work(&sfp->ew.work);
}
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
{
int *k = data;
if (*k < id)
*k = id;
return 0;
}
static int
sg_last_dev(void)
{
int k = -1;
unsigned long iflags;
read_lock_irqsave(&sg_index_lock, iflags);
idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
read_unlock_irqrestore(&sg_index_lock, iflags);
return k + 1; /* origin 1 */
}
#endif
/* must be called with sg_index_lock held */
static Sg_device *sg_lookup_dev(int dev)
{
return idr_find(&sg_index_idr, dev);
}
static Sg_device *
sg_get_dev(int dev)
{
struct sg_device *sdp;
unsigned long flags;
read_lock_irqsave(&sg_index_lock, flags);
sdp = sg_lookup_dev(dev);
if (!sdp)
sdp = ERR_PTR(-ENXIO);
else if (atomic_read(&sdp->detaching)) {
/* If sdp->detaching, then the refcount may already be 0, in
* which case it would be a bug to do kref_get().
*/
sdp = ERR_PTR(-ENODEV);
} else
kref_get(&sdp->d_ref);
read_unlock_irqrestore(&sg_index_lock, flags);
return sdp;
}
#ifdef CONFIG_SCSI_PROC_FS
static struct proc_dir_entry *sg_proc_sgp = NULL;
static char sg_proc_sg_dirname[] = "scsi/sg";
static int sg_proc_seq_show_int(struct seq_file *s, void *v);
static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
size_t count, loff_t *off);
static const struct file_operations adio_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_adio,
.read = seq_read,
.llseek = seq_lseek,
.write = sg_proc_write_adio,
.release = single_release,
};
static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
static ssize_t sg_proc_write_dressz(struct file *filp,
const char __user *buffer, size_t count, loff_t *off);
static const struct file_operations dressz_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_dressz,
.read = seq_read,
.llseek = seq_lseek,
.write = sg_proc_write_dressz,
.release = single_release,
};
static int sg_proc_seq_show_version(struct seq_file *s, void *v);
static int sg_proc_single_open_version(struct inode *inode, struct file *file);
static const struct file_operations version_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_version,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
static const struct file_operations devhdr_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_devhdr,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
static int sg_proc_open_dev(struct inode *inode, struct file *file);
static void * dev_seq_start(struct seq_file *s, loff_t *pos);
static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
static void dev_seq_stop(struct seq_file *s, void *v);
static const struct file_operations dev_fops = {
.owner = THIS_MODULE,
.open = sg_proc_open_dev,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct seq_operations dev_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = sg_proc_seq_show_dev,
};
static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
static const struct file_operations devstrs_fops = {
.owner = THIS_MODULE,
.open = sg_proc_open_devstrs,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct seq_operations devstrs_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = sg_proc_seq_show_devstrs,
};
static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
static int sg_proc_open_debug(struct inode *inode, struct file *file);
static const struct file_operations debug_fops = {
.owner = THIS_MODULE,
.open = sg_proc_open_debug,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct seq_operations debug_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = sg_proc_seq_show_debug,
};
struct sg_proc_leaf {
const char * name;
const struct file_operations * fops;
};
static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
{"allow_dio", &adio_fops},
{"debug", &debug_fops},
{"def_reserved_size", &dressz_fops},
{"device_hdr", &devhdr_fops},
{"devices", &dev_fops},
{"device_strs", &devstrs_fops},
{"version", &version_fops}
};
static int
sg_proc_init(void)
{
int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
int k;
sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
if (!sg_proc_sgp)
return 1;
for (k = 0; k < num_leaves; ++k) {
const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
}
return 0;
}
static void
sg_proc_cleanup(void)
{
int k;
int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
if (!sg_proc_sgp)
return;
for (k = 0; k < num_leaves; ++k)
remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
remove_proc_entry(sg_proc_sg_dirname, NULL);
}
static int sg_proc_seq_show_int(struct seq_file *s, void *v)
{
seq_printf(s, "%d\n", *((int *)s->private));
return 0;
}
static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
}
static ssize_t
sg_proc_write_adio(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
int err;
unsigned long num;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
err = kstrtoul_from_user(buffer, count, 0, &num);
if (err)
return err;
sg_allow_dio = num ? 1 : 0;
return count;
}
static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
}
static ssize_t
sg_proc_write_dressz(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
int err;
unsigned long k = ULONG_MAX;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
err = kstrtoul_from_user(buffer, count, 0, &k);
if (err)
return err;
if (k <= 1048576) { /* limit "big buff" to 1 MB */
sg_big_buff = k;
return count;
}
return -ERANGE;
}
static int sg_proc_seq_show_version(struct seq_file *s, void *v)
{
seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
sg_version_date);
return 0;
}
static int sg_proc_single_open_version(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_version, NULL);
}
static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
{
seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n");
return 0;
}
static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_devhdr, NULL);
}
struct sg_proc_deviter {
loff_t index;
size_t max;
};
static void * dev_seq_start(struct seq_file *s, loff_t *pos)
{
struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
s->private = it;
if (! it)
return NULL;
it->index = *pos;
it->max = sg_last_dev();
if (it->index >= it->max)
return NULL;
return it;
}
static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct sg_proc_deviter * it = s->private;
*pos = ++it->index;
return (it->index < it->max) ? it : NULL;
}
static void dev_seq_stop(struct seq_file *s, void *v)
{
kfree(s->private);
}
static int sg_proc_open_dev(struct inode *inode, struct file *file)
{
return seq_open(file, &dev_seq_ops);
}
static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
{
struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
Sg_device *sdp;
struct scsi_device *scsidp;
unsigned long iflags;
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
if ((NULL == sdp) || (NULL == sdp->device) ||
(atomic_read(&sdp->detaching)))
seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
else {
scsidp = sdp->device;
seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n",
scsidp->host->host_no, scsidp->channel,
scsidp->id, scsidp->lun, (int) scsidp->type,
1,
(int) scsidp->queue_depth,
(int) atomic_read(&scsidp->device_busy),
(int) scsi_device_online(scsidp));
}
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
{
return seq_open(file, &devstrs_seq_ops);
}
static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
{
struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
Sg_device *sdp;
struct scsi_device *scsidp;
unsigned long iflags;
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
scsidp = sdp ? sdp->device : NULL;
if (sdp && scsidp && (!atomic_read(&sdp->detaching)))
seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
scsidp->vendor, scsidp->model, scsidp->rev);
else
seq_puts(s, "<no active device>\n");
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
/* must be called while holding sg_index_lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
int k, new_interface, blen, usg;
Sg_request *srp;
Sg_fd *fp;
const sg_io_hdr_t *hp;
const char * cp;
unsigned int ms;
k = 0;
list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
k++;
read_lock(&fp->rq_list_lock); /* irqs already disabled */
seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
"(res)sgat=%d low_dma=%d\n", k,
jiffies_to_msecs(fp->timeout),
fp->reserve.bufflen,
(int) fp->reserve.k_use_sg,
(int) sdp->device->host->unchecked_isa_dma);
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
(int) fp->cmd_q, (int) fp->force_packid,
(int) fp->keep_orphan);
list_for_each_entry(srp, &fp->rq_list, entry) {
hp = &srp->header;
new_interface = (hp->interface_id == '\0') ? 0 : 1;
if (srp->res_used) {
if (new_interface &&
(SG_FLAG_MMAP_IO & hp->flags))
cp = " mmap>> ";
else
cp = " rb>> ";
} else {
if (SG_INFO_DIRECT_IO_MASK & hp->info)
cp = " dio>> ";
else
cp = " ";
}
seq_puts(s, cp);
blen = srp->data.bufflen;
usg = srp->data.k_use_sg;
seq_puts(s, srp->done ?
((1 == srp->done) ? "rcv:" : "fin:")
: "act:");
seq_printf(s, " id=%d blen=%d",
srp->header.pack_id, blen);
if (srp->done)
seq_printf(s, " dur=%d", hp->duration);
else {
ms = jiffies_to_msecs(jiffies);
seq_printf(s, " t_o/elap=%d/%d",
(new_interface ? hp->timeout :
jiffies_to_msecs(fp->timeout)),
(ms > hp->duration ? ms - hp->duration : 0));
}
seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
(int) srp->data.cmd_opcode);
}
if (list_empty(&fp->rq_list))
seq_puts(s, " No requests active\n");
read_unlock(&fp->rq_list_lock);
}
}
static int sg_proc_open_debug(struct inode *inode, struct file *file)
{
return seq_open(file, &debug_seq_ops);
}
static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
{
struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
Sg_device *sdp;
unsigned long iflags;
if (it && (0 == it->index))
seq_printf(s, "max_active_device=%d def_reserved_size=%d\n",
(int)it->max, sg_big_buff);
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
if (NULL == sdp)
goto skip;
read_lock(&sdp->sfd_lock);
if (!list_empty(&sdp->sfds)) {
seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
if (atomic_read(&sdp->detaching))
seq_puts(s, "detaching pending close ");
else if (sdp->device) {
struct scsi_device *scsidp = sdp->device;
seq_printf(s, "%d:%d:%d:%llu em=%d",
scsidp->host->host_no,
scsidp->channel, scsidp->id,
scsidp->lun,
scsidp->host->hostt->emulated);
}
seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n",
sdp->sg_tablesize, sdp->exclude, sdp->open_cnt);
sg_proc_debug_helper(s, sdp);
}
read_unlock(&sdp->sfd_lock);
skip:
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
#endif /* CONFIG_SCSI_PROC_FS */
module_init(init_sg);
module_exit(exit_sg);
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_2828_0 |
crossvul-cpp_data_bad_866_5 | /*
* Copyright (C) 2014-2019 Yubico AB - See COPYING
*/
/* Define which PAM interfaces we provide */
#define PAM_SM_AUTH
/* Include PAM headers */
#include <security/pam_appl.h>
#include <security/pam_modules.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <stdlib.h>
#include <syslog.h>
#include <pwd.h>
#include <string.h>
#include <errno.h>
#include "util.h"
/* If secure_getenv is not defined, define it here */
#ifndef HAVE_SECURE_GETENV
char *secure_getenv(const char *);
char *secure_getenv(const char *name) {
(void)name;
return NULL;
}
#endif
static void parse_cfg(int flags, int argc, const char **argv, cfg_t *cfg) {
struct stat st;
FILE *file = NULL;
int fd = -1;
int i;
memset(cfg, 0, sizeof(cfg_t));
cfg->debug_file = stderr;
for (i = 0; i < argc; i++) {
if (strncmp(argv[i], "max_devices=", 12) == 0)
sscanf(argv[i], "max_devices=%u", &cfg->max_devs);
if (strcmp(argv[i], "manual") == 0)
cfg->manual = 1;
if (strcmp(argv[i], "debug") == 0)
cfg->debug = 1;
if (strcmp(argv[i], "nouserok") == 0)
cfg->nouserok = 1;
if (strcmp(argv[i], "openasuser") == 0)
cfg->openasuser = 1;
if (strcmp(argv[i], "alwaysok") == 0)
cfg->alwaysok = 1;
if (strcmp(argv[i], "interactive") == 0)
cfg->interactive = 1;
if (strcmp(argv[i], "cue") == 0)
cfg->cue = 1;
if (strcmp(argv[i], "nodetect") == 0)
cfg->nodetect = 1;
if (strncmp(argv[i], "authfile=", 9) == 0)
cfg->auth_file = argv[i] + 9;
if (strncmp(argv[i], "authpending_file=", 17) == 0)
cfg->authpending_file = argv[i] + 17;
if (strncmp(argv[i], "origin=", 7) == 0)
cfg->origin = argv[i] + 7;
if (strncmp(argv[i], "appid=", 6) == 0)
cfg->appid = argv[i] + 6;
if (strncmp(argv[i], "prompt=", 7) == 0)
cfg->prompt = argv[i] + 7;
if (strncmp (argv[i], "debug_file=", 11) == 0) {
const char *filename = argv[i] + 11;
if(strncmp (filename, "stdout", 6) == 0) {
cfg->debug_file = stdout;
}
else if(strncmp (filename, "stderr", 6) == 0) {
cfg->debug_file = stderr;
}
else if( strncmp (filename, "syslog", 6) == 0) {
cfg->debug_file = (FILE *)-1;
}
else {
fd = open(filename, O_WRONLY | O_APPEND | O_CLOEXEC | O_NOFOLLOW | O_NOCTTY);
if (fd >= 0 && (fstat(fd, &st) == 0) && S_ISREG(st.st_mode)) {
file = fdopen(fd, "a");
if(file != NULL) {
cfg->debug_file = file;
cfg->is_custom_debug_file = 1;
file = NULL;
fd = -1;
}
}
}
}
}
if (cfg->debug) {
D(cfg->debug_file, "called.");
D(cfg->debug_file, "flags %d argc %d", flags, argc);
for (i = 0; i < argc; i++) {
D(cfg->debug_file, "argv[%d]=%s", i, argv[i]);
}
D(cfg->debug_file, "max_devices=%d", cfg->max_devs);
D(cfg->debug_file, "debug=%d", cfg->debug);
D(cfg->debug_file, "interactive=%d", cfg->interactive);
D(cfg->debug_file, "cue=%d", cfg->cue);
D(cfg->debug_file, "nodetect=%d", cfg->nodetect);
D(cfg->debug_file, "manual=%d", cfg->manual);
D(cfg->debug_file, "nouserok=%d", cfg->nouserok);
D(cfg->debug_file, "openasuser=%d", cfg->openasuser);
D(cfg->debug_file, "alwaysok=%d", cfg->alwaysok);
D(cfg->debug_file, "authfile=%s", cfg->auth_file ? cfg->auth_file : "(null)");
D(cfg->debug_file, "authpending_file=%s", cfg->authpending_file ? cfg->authpending_file : "(null)");
D(cfg->debug_file, "origin=%s", cfg->origin ? cfg->origin : "(null)");
D(cfg->debug_file, "appid=%s", cfg->appid ? cfg->appid : "(null)");
D(cfg->debug_file, "prompt=%s", cfg->prompt ? cfg->prompt : "(null)");
}
if (fd != -1)
close(fd);
if (file != NULL)
fclose(file);
}
#ifdef DBG
#undef DBG
#endif
#define DBG(...) \
if (cfg->debug) { \
D(cfg->debug_file, __VA_ARGS__); \
}
/* PAM entry point for authentication verification */
int pam_sm_authenticate(pam_handle_t *pamh, int flags, int argc,
const char **argv) {
struct passwd *pw = NULL, pw_s;
const char *user = NULL;
cfg_t cfg_st;
cfg_t *cfg = &cfg_st;
char buffer[BUFSIZE];
char *buf = NULL;
char *authfile_dir;
size_t authfile_dir_len;
int pgu_ret, gpn_ret;
int retval = PAM_IGNORE;
device_t *devices = NULL;
unsigned n_devices = 0;
int openasuser;
int should_free_origin = 0;
int should_free_appid = 0;
int should_free_auth_file = 0;
int should_free_authpending_file = 0;
parse_cfg(flags, argc, argv, cfg);
if (!cfg->origin) {
strcpy(buffer, DEFAULT_ORIGIN_PREFIX);
if (gethostname(buffer + strlen(DEFAULT_ORIGIN_PREFIX),
BUFSIZE - strlen(DEFAULT_ORIGIN_PREFIX)) == -1) {
DBG("Unable to get host name");
goto done;
}
DBG("Origin not specified, using \"%s\"", buffer);
cfg->origin = strdup(buffer);
if (!cfg->origin) {
DBG("Unable to allocate memory");
goto done;
} else {
should_free_origin = 1;
}
}
if (!cfg->appid) {
DBG("Appid not specified, using the same value of origin (%s)",
cfg->origin);
cfg->appid = strdup(cfg->origin);
if (!cfg->appid) {
DBG("Unable to allocate memory")
goto done;
} else {
should_free_appid = 1;
}
}
if (cfg->max_devs == 0) {
DBG("Maximum devices number not set. Using default (%d)", MAX_DEVS);
cfg->max_devs = MAX_DEVS;
}
devices = malloc(sizeof(device_t) * cfg->max_devs);
if (!devices) {
DBG("Unable to allocate memory");
retval = PAM_IGNORE;
goto done;
}
pgu_ret = pam_get_user(pamh, &user, NULL);
if (pgu_ret != PAM_SUCCESS || user == NULL) {
DBG("Unable to access user %s", user);
retval = PAM_CONV_ERR;
goto done;
}
DBG("Requesting authentication for user %s", user);
gpn_ret = getpwnam_r(user, &pw_s, buffer, sizeof(buffer), &pw);
if (gpn_ret != 0 || pw == NULL || pw->pw_dir == NULL ||
pw->pw_dir[0] != '/') {
DBG("Unable to retrieve credentials for user %s, (%s)", user,
strerror(errno));
retval = PAM_USER_UNKNOWN;
goto done;
}
DBG("Found user %s", user);
DBG("Home directory for %s is %s", user, pw->pw_dir);
if (!cfg->auth_file) {
buf = NULL;
authfile_dir = secure_getenv(DEFAULT_AUTHFILE_DIR_VAR);
if (!authfile_dir) {
DBG("Variable %s is not set. Using default value ($HOME/.config/)",
DEFAULT_AUTHFILE_DIR_VAR);
authfile_dir_len =
strlen(pw->pw_dir) + strlen("/.config") + strlen(DEFAULT_AUTHFILE) + 1;
buf = malloc(sizeof(char) * (authfile_dir_len));
if (!buf) {
DBG("Unable to allocate memory");
retval = PAM_IGNORE;
goto done;
}
snprintf(buf, authfile_dir_len,
"%s/.config%s", pw->pw_dir, DEFAULT_AUTHFILE);
} else {
DBG("Variable %s set to %s", DEFAULT_AUTHFILE_DIR_VAR, authfile_dir);
authfile_dir_len = strlen(authfile_dir) + strlen(DEFAULT_AUTHFILE) + 1;
buf = malloc(sizeof(char) * (authfile_dir_len));
if (!buf) {
DBG("Unable to allocate memory");
retval = PAM_IGNORE;
goto done;
}
snprintf(buf, authfile_dir_len,
"%s%s", authfile_dir, DEFAULT_AUTHFILE);
}
DBG("Using default authentication file %s", buf);
cfg->auth_file = buf; /* cfg takes ownership */
should_free_auth_file = 1;
buf = NULL;
} else {
DBG("Using authentication file %s", cfg->auth_file);
}
openasuser = geteuid() == 0 && cfg->openasuser;
if (openasuser) {
if (seteuid(pw_s.pw_uid)) {
DBG("Unable to switch user to uid %i", pw_s.pw_uid);
retval = PAM_IGNORE;
goto done;
}
DBG("Switched to uid %i", pw_s.pw_uid);
}
retval = get_devices_from_authfile(cfg->auth_file, user, cfg->max_devs,
cfg->debug, cfg->debug_file,
devices, &n_devices);
if (openasuser) {
if (seteuid(0)) {
DBG("Unable to switch back to uid 0");
retval = PAM_IGNORE;
goto done;
}
DBG("Switched back to uid 0");
}
if (retval != 1) {
// for nouserok; make sure errors in get_devices_from_authfile don't
// result in valid devices
n_devices = 0;
}
if (n_devices == 0) {
if (cfg->nouserok) {
DBG("Found no devices but nouserok specified. Skipping authentication");
retval = PAM_SUCCESS;
goto done;
} else if (retval != 1) {
DBG("Unable to get devices from file %s", cfg->auth_file);
retval = PAM_AUTHINFO_UNAVAIL;
goto done;
} else {
DBG("Found no devices. Aborting.");
retval = PAM_AUTHINFO_UNAVAIL;
goto done;
}
}
// Determine the full path for authpending_file in order to emit touch request notifications
if (!cfg->authpending_file) {
int actual_size = snprintf(buffer, BUFSIZE, DEFAULT_AUTHPENDING_FILE_PATH, getuid());
if (actual_size >= 0 && actual_size < BUFSIZE) {
cfg->authpending_file = strdup(buffer);
}
if (!cfg->authpending_file) {
DBG("Unable to allocate memory for the authpending_file, touch request notifications will not be emitted");
} else {
should_free_authpending_file = 1;
}
} else {
if (strlen(cfg->authpending_file) == 0) {
DBG("authpending_file is set to an empty value, touch request notifications will be disabled");
cfg->authpending_file = NULL;
}
}
int authpending_file_descriptor = -1;
if (cfg->authpending_file) {
DBG("Using file '%s' for emitting touch request notifications", cfg->authpending_file);
// Open (or create) the authpending_file to indicate that we start waiting for a touch
authpending_file_descriptor =
open(cfg->authpending_file, O_RDONLY | O_CREAT | O_CLOEXEC | O_NOFOLLOW | O_NOCTTY, 0664);
if (authpending_file_descriptor < 0) {
DBG("Unable to emit 'authentication started' notification by opening the file '%s', (%s)",
cfg->authpending_file, strerror(errno));
}
}
if (cfg->manual == 0) {
if (cfg->interactive) {
converse(pamh, PAM_PROMPT_ECHO_ON,
cfg->prompt != NULL ? cfg->prompt : DEFAULT_PROMPT);
}
retval = do_authentication(cfg, devices, n_devices, pamh);
} else {
retval = do_manual_authentication(cfg, devices, n_devices, pamh);
}
// Close the authpending_file to indicate that we stop waiting for a touch
if (authpending_file_descriptor >= 0) {
if (close(authpending_file_descriptor) < 0) {
DBG("Unable to emit 'authentication stopped' notification by closing the file '%s', (%s)",
cfg->authpending_file, strerror(errno));
}
}
if (retval != 1) {
DBG("do_authentication returned %d", retval);
retval = PAM_AUTH_ERR;
goto done;
}
retval = PAM_SUCCESS;
done:
free_devices(devices, n_devices);
if (buf) {
free(buf);
buf = NULL;
}
if (should_free_origin) {
free((char *) cfg->origin);
cfg->origin = NULL;
}
if (should_free_appid) {
free((char *) cfg->appid);
cfg->appid = NULL;
}
if (should_free_auth_file) {
free((char *) cfg->auth_file);
cfg->auth_file = NULL;
}
if (should_free_authpending_file) {
free((char *) cfg->authpending_file);
cfg->authpending_file = NULL;
}
if (cfg->alwaysok && retval != PAM_SUCCESS) {
DBG("alwaysok needed (otherwise return with %d)", retval);
retval = PAM_SUCCESS;
}
DBG("done. [%s]", pam_strerror(pamh, retval));
if (cfg->is_custom_debug_file) {
fclose(cfg->debug_file);
}
return retval;
}
PAM_EXTERN int pam_sm_setcred(pam_handle_t *pamh, int flags, int argc,
const char **argv) {
(void)pamh;
(void)flags;
(void)argc;
(void)argv;
return PAM_SUCCESS;
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_866_5 |
crossvul-cpp_data_bad_5609_0 | /*
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
*
* Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 2001,2002 Andi Kleen, SuSE Labs
* Copyright (C) 2003 Pavel Machek (pavel@ucw.cz)
*
* These routines maintain argument size conversion between 32bit and 64bit
* ioctls.
*/
#include <linux/joystick.h>
#include <linux/types.h>
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/capability.h>
#include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/ioctl.h>
#include <linux/if.h>
#include <linux/if_bridge.h>
#include <linux/raid/md_u.h>
#include <linux/kd.h>
#include <linux/route.h>
#include <linux/in6.h>
#include <linux/ipv6_route.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/vt.h>
#include <linux/falloc.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-ioctl.h>
#include <linux/if_pppox.h>
#include <linux/mtio.h>
#include <linux/auto_fs.h>
#include <linux/auto_fs4.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>
#include <linux/fb.h>
#include <linux/videodev2.h>
#include <linux/netdevice.h>
#include <linux/raw.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/rtc.h>
#include <linux/pci.h>
#include <linux/serial.h>
#include <linux/if_tun.h>
#include <linux/ctype.h>
#include <linux/syscalls.h>
#include <linux/i2c.h>
#include <linux/i2c-dev.h>
#include <linux/atalk.h>
#include <linux/gfp.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci.h>
#include <net/bluetooth/rfcomm.h>
#include <linux/capi.h>
#include <linux/gigaset_dev.h>
#ifdef CONFIG_BLOCK
#include <linux/loop.h>
#include <linux/cdrom.h>
#include <linux/fd.h>
#include <scsi/scsi.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/sg.h>
#endif
#include <asm/uaccess.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/if_bonding.h>
#include <linux/watchdog.h>
#include <linux/soundcard.h>
#include <linux/lp.h>
#include <linux/ppdev.h>
#include <linux/atm.h>
#include <linux/atmarp.h>
#include <linux/atmclip.h>
#include <linux/atmdev.h>
#include <linux/atmioc.h>
#include <linux/atmlec.h>
#include <linux/atmmpc.h>
#include <linux/atmsvc.h>
#include <linux/atm_tcp.h>
#include <linux/sonet.h>
#include <linux/atm_suni.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
#include <linux/nbd.h>
#include <linux/random.h>
#include <linux/filter.h>
#include <linux/hiddev.h>
#define __DVB_CORE__
#include <linux/dvb/audio.h>
#include <linux/dvb/dmx.h>
#include <linux/dvb/frontend.h>
#include <linux/dvb/video.h>
#include <linux/sort.h>
#ifdef CONFIG_SPARC
#include <asm/fbio.h>
#endif
static int w_long(unsigned int fd, unsigned int cmd,
compat_ulong_t __user *argp)
{
mm_segment_t old_fs = get_fs();
int err;
unsigned long val;
set_fs (KERNEL_DS);
err = sys_ioctl(fd, cmd, (unsigned long)&val);
set_fs (old_fs);
if (!err && put_user(val, argp))
return -EFAULT;
return err;
}
struct compat_video_event {
int32_t type;
compat_time_t timestamp;
union {
video_size_t size;
unsigned int frame_rate;
} u;
};
static int do_video_get_event(unsigned int fd, unsigned int cmd,
struct compat_video_event __user *up)
{
struct video_event kevent;
mm_segment_t old_fs = get_fs();
int err;
set_fs(KERNEL_DS);
err = sys_ioctl(fd, cmd, (unsigned long) &kevent);
set_fs(old_fs);
if (!err) {
err = put_user(kevent.type, &up->type);
err |= put_user(kevent.timestamp, &up->timestamp);
err |= put_user(kevent.u.size.w, &up->u.size.w);
err |= put_user(kevent.u.size.h, &up->u.size.h);
err |= put_user(kevent.u.size.aspect_ratio,
&up->u.size.aspect_ratio);
if (err)
err = -EFAULT;
}
return err;
}
struct compat_video_still_picture {
compat_uptr_t iFrame;
int32_t size;
};
static int do_video_stillpicture(unsigned int fd, unsigned int cmd,
struct compat_video_still_picture __user *up)
{
struct video_still_picture __user *up_native;
compat_uptr_t fp;
int32_t size;
int err;
err = get_user(fp, &up->iFrame);
err |= get_user(size, &up->size);
if (err)
return -EFAULT;
up_native =
compat_alloc_user_space(sizeof(struct video_still_picture));
err = put_user(compat_ptr(fp), &up_native->iFrame);
err |= put_user(size, &up_native->size);
if (err)
return -EFAULT;
err = sys_ioctl(fd, cmd, (unsigned long) up_native);
return err;
}
struct compat_video_spu_palette {
int length;
compat_uptr_t palette;
};
static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
struct compat_video_spu_palette __user *up)
{
struct video_spu_palette __user *up_native;
compat_uptr_t palp;
int length, err;
err = get_user(palp, &up->palette);
err |= get_user(length, &up->length);
up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
err = put_user(compat_ptr(palp), &up_native->palette);
err |= put_user(length, &up_native->length);
if (err)
return -EFAULT;
err = sys_ioctl(fd, cmd, (unsigned long) up_native);
return err;
}
#ifdef CONFIG_BLOCK
typedef struct sg_io_hdr32 {
compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
compat_int_t dxfer_direction; /* [i] data transfer direction */
unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
unsigned char mx_sb_len; /* [i] max length to write to sbp */
unsigned short iovec_count; /* [i] 0 implies no scatter gather */
compat_uint_t dxfer_len; /* [i] byte count of data transfer */
compat_uint_t dxferp; /* [i], [*io] points to data transfer memory
or scatter gather list */
compat_uptr_t cmdp; /* [i], [*i] points to command to perform */
compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */
compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */
compat_int_t pack_id; /* [i->o] unused internally (normally) */
compat_uptr_t usr_ptr; /* [i->o] unused internally */
unsigned char status; /* [o] scsi status */
unsigned char masked_status; /* [o] shifted, masked scsi status */
unsigned char msg_status; /* [o] messaging level data (optional) */
unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
unsigned short host_status; /* [o] errors from host adapter */
unsigned short driver_status; /* [o] errors from software driver */
compat_int_t resid; /* [o] dxfer_len - actual_transferred */
compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */
compat_uint_t info; /* [o] auxiliary information */
} sg_io_hdr32_t; /* 64 bytes long (on sparc32) */
typedef struct sg_iovec32 {
compat_uint_t iov_base;
compat_uint_t iov_len;
} sg_iovec32_t;
static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count)
{
sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1);
sg_iovec32_t __user *iov32 = dxferp;
int i;
for (i = 0; i < iovec_count; i++) {
u32 base, len;
if (get_user(base, &iov32[i].iov_base) ||
get_user(len, &iov32[i].iov_len) ||
put_user(compat_ptr(base), &iov[i].iov_base) ||
put_user(len, &iov[i].iov_len))
return -EFAULT;
}
if (put_user(iov, &sgio->dxferp))
return -EFAULT;
return 0;
}
static int sg_ioctl_trans(unsigned int fd, unsigned int cmd,
sg_io_hdr32_t __user *sgio32)
{
sg_io_hdr_t __user *sgio;
u16 iovec_count;
u32 data;
void __user *dxferp;
int err;
int interface_id;
if (get_user(interface_id, &sgio32->interface_id))
return -EFAULT;
if (interface_id != 'S')
return sys_ioctl(fd, cmd, (unsigned long)sgio32);
if (get_user(iovec_count, &sgio32->iovec_count))
return -EFAULT;
{
void __user *top = compat_alloc_user_space(0);
void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) +
(iovec_count * sizeof(sg_iovec_t)));
if (new > top)
return -EINVAL;
sgio = new;
}
/* Ok, now construct. */
if (copy_in_user(&sgio->interface_id, &sgio32->interface_id,
(2 * sizeof(int)) +
(2 * sizeof(unsigned char)) +
(1 * sizeof(unsigned short)) +
(1 * sizeof(unsigned int))))
return -EFAULT;
if (get_user(data, &sgio32->dxferp))
return -EFAULT;
dxferp = compat_ptr(data);
if (iovec_count) {
if (sg_build_iovec(sgio, dxferp, iovec_count))
return -EFAULT;
} else {
if (put_user(dxferp, &sgio->dxferp))
return -EFAULT;
}
{
unsigned char __user *cmdp;
unsigned char __user *sbp;
if (get_user(data, &sgio32->cmdp))
return -EFAULT;
cmdp = compat_ptr(data);
if (get_user(data, &sgio32->sbp))
return -EFAULT;
sbp = compat_ptr(data);
if (put_user(cmdp, &sgio->cmdp) ||
put_user(sbp, &sgio->sbp))
return -EFAULT;
}
if (copy_in_user(&sgio->timeout, &sgio32->timeout,
3 * sizeof(int)))
return -EFAULT;
if (get_user(data, &sgio32->usr_ptr))
return -EFAULT;
if (put_user(compat_ptr(data), &sgio->usr_ptr))
return -EFAULT;
err = sys_ioctl(fd, cmd, (unsigned long) sgio);
if (err >= 0) {
void __user *datap;
if (copy_in_user(&sgio32->pack_id, &sgio->pack_id,
sizeof(int)) ||
get_user(datap, &sgio->usr_ptr) ||
put_user((u32)(unsigned long)datap,
&sgio32->usr_ptr) ||
copy_in_user(&sgio32->status, &sgio->status,
(4 * sizeof(unsigned char)) +
(2 * sizeof(unsigned short)) +
(3 * sizeof(int))))
err = -EFAULT;
}
return err;
}
struct compat_sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
char req_state;
char orphan;
char sg_io_owned;
char problem;
int pack_id;
compat_uptr_t usr_ptr;
unsigned int duration;
int unused;
};
static int sg_grt_trans(unsigned int fd, unsigned int cmd, struct
compat_sg_req_info __user *o)
{
int err, i;
sg_req_info_t __user *r;
r = compat_alloc_user_space(sizeof(sg_req_info_t)*SG_MAX_QUEUE);
err = sys_ioctl(fd,cmd,(unsigned long)r);
if (err < 0)
return err;
for (i = 0; i < SG_MAX_QUEUE; i++) {
void __user *ptr;
int d;
if (copy_in_user(o + i, r + i, offsetof(sg_req_info_t, usr_ptr)) ||
get_user(ptr, &r[i].usr_ptr) ||
get_user(d, &r[i].duration) ||
put_user((u32)(unsigned long)(ptr), &o[i].usr_ptr) ||
put_user(d, &o[i].duration))
return -EFAULT;
}
return err;
}
#endif /* CONFIG_BLOCK */
struct sock_fprog32 {
unsigned short len;
compat_caddr_t filter;
};
#define PPPIOCSPASS32 _IOW('t', 71, struct sock_fprog32)
#define PPPIOCSACTIVE32 _IOW('t', 70, struct sock_fprog32)
static int ppp_sock_fprog_ioctl_trans(unsigned int fd, unsigned int cmd,
struct sock_fprog32 __user *u_fprog32)
{
struct sock_fprog __user *u_fprog64 = compat_alloc_user_space(sizeof(struct sock_fprog));
void __user *fptr64;
u32 fptr32;
u16 flen;
if (get_user(flen, &u_fprog32->len) ||
get_user(fptr32, &u_fprog32->filter))
return -EFAULT;
fptr64 = compat_ptr(fptr32);
if (put_user(flen, &u_fprog64->len) ||
put_user(fptr64, &u_fprog64->filter))
return -EFAULT;
if (cmd == PPPIOCSPASS32)
cmd = PPPIOCSPASS;
else
cmd = PPPIOCSACTIVE;
return sys_ioctl(fd, cmd, (unsigned long) u_fprog64);
}
struct ppp_option_data32 {
compat_caddr_t ptr;
u32 length;
compat_int_t transmit;
};
#define PPPIOCSCOMPRESS32 _IOW('t', 77, struct ppp_option_data32)
struct ppp_idle32 {
compat_time_t xmit_idle;
compat_time_t recv_idle;
};
#define PPPIOCGIDLE32 _IOR('t', 63, struct ppp_idle32)
static int ppp_gidle(unsigned int fd, unsigned int cmd,
struct ppp_idle32 __user *idle32)
{
struct ppp_idle __user *idle;
__kernel_time_t xmit, recv;
int err;
idle = compat_alloc_user_space(sizeof(*idle));
err = sys_ioctl(fd, PPPIOCGIDLE, (unsigned long) idle);
if (!err) {
if (get_user(xmit, &idle->xmit_idle) ||
get_user(recv, &idle->recv_idle) ||
put_user(xmit, &idle32->xmit_idle) ||
put_user(recv, &idle32->recv_idle))
err = -EFAULT;
}
return err;
}
static int ppp_scompress(unsigned int fd, unsigned int cmd,
struct ppp_option_data32 __user *odata32)
{
struct ppp_option_data __user *odata;
__u32 data;
void __user *datap;
odata = compat_alloc_user_space(sizeof(*odata));
if (get_user(data, &odata32->ptr))
return -EFAULT;
datap = compat_ptr(data);
if (put_user(datap, &odata->ptr))
return -EFAULT;
if (copy_in_user(&odata->length, &odata32->length,
sizeof(__u32) + sizeof(int)))
return -EFAULT;
return sys_ioctl(fd, PPPIOCSCOMPRESS, (unsigned long) odata);
}
#ifdef CONFIG_BLOCK
struct mtget32 {
compat_long_t mt_type;
compat_long_t mt_resid;
compat_long_t mt_dsreg;
compat_long_t mt_gstat;
compat_long_t mt_erreg;
compat_daddr_t mt_fileno;
compat_daddr_t mt_blkno;
};
#define MTIOCGET32 _IOR('m', 2, struct mtget32)
struct mtpos32 {
compat_long_t mt_blkno;
};
#define MTIOCPOS32 _IOR('m', 3, struct mtpos32)
static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, void __user *argp)
{
mm_segment_t old_fs = get_fs();
struct mtget get;
struct mtget32 __user *umget32;
struct mtpos pos;
struct mtpos32 __user *upos32;
unsigned long kcmd;
void *karg;
int err = 0;
switch(cmd) {
case MTIOCPOS32:
kcmd = MTIOCPOS;
karg = &pos;
break;
default: /* MTIOCGET32 */
kcmd = MTIOCGET;
karg = &get;
break;
}
set_fs (KERNEL_DS);
err = sys_ioctl (fd, kcmd, (unsigned long)karg);
set_fs (old_fs);
if (err)
return err;
switch (cmd) {
case MTIOCPOS32:
upos32 = argp;
err = __put_user(pos.mt_blkno, &upos32->mt_blkno);
break;
case MTIOCGET32:
umget32 = argp;
err = __put_user(get.mt_type, &umget32->mt_type);
err |= __put_user(get.mt_resid, &umget32->mt_resid);
err |= __put_user(get.mt_dsreg, &umget32->mt_dsreg);
err |= __put_user(get.mt_gstat, &umget32->mt_gstat);
err |= __put_user(get.mt_erreg, &umget32->mt_erreg);
err |= __put_user(get.mt_fileno, &umget32->mt_fileno);
err |= __put_user(get.mt_blkno, &umget32->mt_blkno);
break;
}
return err ? -EFAULT: 0;
}
#endif /* CONFIG_BLOCK */
/* Bluetooth ioctls */
#define HCIUARTSETPROTO _IOW('U', 200, int)
#define HCIUARTGETPROTO _IOR('U', 201, int)
#define HCIUARTGETDEVICE _IOR('U', 202, int)
#define HCIUARTSETFLAGS _IOW('U', 203, int)
#define HCIUARTGETFLAGS _IOR('U', 204, int)
#define BNEPCONNADD _IOW('B', 200, int)
#define BNEPCONNDEL _IOW('B', 201, int)
#define BNEPGETCONNLIST _IOR('B', 210, int)
#define BNEPGETCONNINFO _IOR('B', 211, int)
#define CMTPCONNADD _IOW('C', 200, int)
#define CMTPCONNDEL _IOW('C', 201, int)
#define CMTPGETCONNLIST _IOR('C', 210, int)
#define CMTPGETCONNINFO _IOR('C', 211, int)
#define HIDPCONNADD _IOW('H', 200, int)
#define HIDPCONNDEL _IOW('H', 201, int)
#define HIDPGETCONNLIST _IOR('H', 210, int)
#define HIDPGETCONNINFO _IOR('H', 211, int)
struct serial_struct32 {
compat_int_t type;
compat_int_t line;
compat_uint_t port;
compat_int_t irq;
compat_int_t flags;
compat_int_t xmit_fifo_size;
compat_int_t custom_divisor;
compat_int_t baud_base;
unsigned short close_delay;
char io_type;
char reserved_char[1];
compat_int_t hub6;
unsigned short closing_wait; /* time to wait before closing */
unsigned short closing_wait2; /* no longer used... */
compat_uint_t iomem_base;
unsigned short iomem_reg_shift;
unsigned int port_high;
/* compat_ulong_t iomap_base FIXME */
compat_int_t reserved[1];
};
static int serial_struct_ioctl(unsigned fd, unsigned cmd,
struct serial_struct32 __user *ss32)
{
typedef struct serial_struct SS;
typedef struct serial_struct32 SS32;
int err;
struct serial_struct ss;
mm_segment_t oldseg = get_fs();
__u32 udata;
unsigned int base;
if (cmd == TIOCSSERIAL) {
if (!access_ok(VERIFY_READ, ss32, sizeof(SS32)))
return -EFAULT;
if (__copy_from_user(&ss, ss32, offsetof(SS32, iomem_base)))
return -EFAULT;
if (__get_user(udata, &ss32->iomem_base))
return -EFAULT;
ss.iomem_base = compat_ptr(udata);
if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
__get_user(ss.port_high, &ss32->port_high))
return -EFAULT;
ss.iomap_base = 0UL;
}
set_fs(KERNEL_DS);
err = sys_ioctl(fd,cmd,(unsigned long)(&ss));
set_fs(oldseg);
if (cmd == TIOCGSERIAL && err >= 0) {
if (!access_ok(VERIFY_WRITE, ss32, sizeof(SS32)))
return -EFAULT;
if (__copy_to_user(ss32,&ss,offsetof(SS32,iomem_base)))
return -EFAULT;
base = (unsigned long)ss.iomem_base >> 32 ?
0xffffffff : (unsigned)(unsigned long)ss.iomem_base;
if (__put_user(base, &ss32->iomem_base) ||
__put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
__put_user(ss.port_high, &ss32->port_high))
return -EFAULT;
}
return err;
}
/*
* I2C layer ioctls
*/
struct i2c_msg32 {
u16 addr;
u16 flags;
u16 len;
compat_caddr_t buf;
};
struct i2c_rdwr_ioctl_data32 {
compat_caddr_t msgs; /* struct i2c_msg __user *msgs */
u32 nmsgs;
};
struct i2c_smbus_ioctl_data32 {
u8 read_write;
u8 command;
u32 size;
compat_caddr_t data; /* union i2c_smbus_data *data */
};
struct i2c_rdwr_aligned {
struct i2c_rdwr_ioctl_data cmd;
struct i2c_msg msgs[0];
};
static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
struct i2c_rdwr_ioctl_data32 __user *udata)
{
struct i2c_rdwr_aligned __user *tdata;
struct i2c_msg __user *tmsgs;
struct i2c_msg32 __user *umsgs;
compat_caddr_t datap;
int nmsgs, i;
if (get_user(nmsgs, &udata->nmsgs))
return -EFAULT;
if (nmsgs > I2C_RDRW_IOCTL_MAX_MSGS)
return -EINVAL;
if (get_user(datap, &udata->msgs))
return -EFAULT;
umsgs = compat_ptr(datap);
tdata = compat_alloc_user_space(sizeof(*tdata) +
nmsgs * sizeof(struct i2c_msg));
tmsgs = &tdata->msgs[0];
if (put_user(nmsgs, &tdata->cmd.nmsgs) ||
put_user(tmsgs, &tdata->cmd.msgs))
return -EFAULT;
for (i = 0; i < nmsgs; i++) {
if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
return -EFAULT;
if (get_user(datap, &umsgs[i].buf) ||
put_user(compat_ptr(datap), &tmsgs[i].buf))
return -EFAULT;
}
return sys_ioctl(fd, cmd, (unsigned long)tdata);
}
static int do_i2c_smbus_ioctl(unsigned int fd, unsigned int cmd,
struct i2c_smbus_ioctl_data32 __user *udata)
{
struct i2c_smbus_ioctl_data __user *tdata;
compat_caddr_t datap;
tdata = compat_alloc_user_space(sizeof(*tdata));
if (tdata == NULL)
return -ENOMEM;
if (!access_ok(VERIFY_WRITE, tdata, sizeof(*tdata)))
return -EFAULT;
if (!access_ok(VERIFY_READ, udata, sizeof(*udata)))
return -EFAULT;
if (__copy_in_user(&tdata->read_write, &udata->read_write, 2 * sizeof(u8)))
return -EFAULT;
if (__copy_in_user(&tdata->size, &udata->size, 2 * sizeof(u32)))
return -EFAULT;
if (__get_user(datap, &udata->data) ||
__put_user(compat_ptr(datap), &tdata->data))
return -EFAULT;
return sys_ioctl(fd, cmd, (unsigned long)tdata);
}
#define RTC_IRQP_READ32 _IOR('p', 0x0b, compat_ulong_t)
#define RTC_IRQP_SET32 _IOW('p', 0x0c, compat_ulong_t)
#define RTC_EPOCH_READ32 _IOR('p', 0x0d, compat_ulong_t)
#define RTC_EPOCH_SET32 _IOW('p', 0x0e, compat_ulong_t)
static int rtc_ioctl(unsigned fd, unsigned cmd, void __user *argp)
{
mm_segment_t oldfs = get_fs();
compat_ulong_t val32;
unsigned long kval;
int ret;
switch (cmd) {
case RTC_IRQP_READ32:
case RTC_EPOCH_READ32:
set_fs(KERNEL_DS);
ret = sys_ioctl(fd, (cmd == RTC_IRQP_READ32) ?
RTC_IRQP_READ : RTC_EPOCH_READ,
(unsigned long)&kval);
set_fs(oldfs);
if (ret)
return ret;
val32 = kval;
return put_user(val32, (unsigned int __user *)argp);
case RTC_IRQP_SET32:
return sys_ioctl(fd, RTC_IRQP_SET, (unsigned long)argp);
case RTC_EPOCH_SET32:
return sys_ioctl(fd, RTC_EPOCH_SET, (unsigned long)argp);
}
return -ENOIOCTLCMD;
}
/* on ia32 l_start is on a 32-bit boundary */
#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
struct space_resv_32 {
__s16 l_type;
__s16 l_whence;
__s64 l_start __attribute__((packed));
/* len == 0 means until end of file */
__s64 l_len __attribute__((packed));
__s32 l_sysid;
__u32 l_pid;
__s32 l_pad[4]; /* reserve area */
};
#define FS_IOC_RESVSP_32 _IOW ('X', 40, struct space_resv_32)
#define FS_IOC_RESVSP64_32 _IOW ('X', 42, struct space_resv_32)
/* just account for different alignment */
static int compat_ioctl_preallocate(struct file *file,
struct space_resv_32 __user *p32)
{
struct space_resv __user *p = compat_alloc_user_space(sizeof(*p));
if (copy_in_user(&p->l_type, &p32->l_type, sizeof(s16)) ||
copy_in_user(&p->l_whence, &p32->l_whence, sizeof(s16)) ||
copy_in_user(&p->l_start, &p32->l_start, sizeof(s64)) ||
copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
return -EFAULT;
return ioctl_preallocate(file, p);
}
#endif
/*
* simple reversible transform to make our table more evenly
* distributed after sorting.
*/
#define XFORM(i) (((i) ^ ((i) << 27) ^ ((i) << 17)) & 0xffffffff)
#define COMPATIBLE_IOCTL(cmd) XFORM(cmd),
/* ioctl should not be warned about even if it's not implemented.
Valid reasons to use this:
- It is implemented with ->compat_ioctl on some device, but programs
call it on others too.
- The ioctl is not implemented in the native kernel, but programs
call it commonly anyways.
Most other reasons are not valid. */
#define IGNORE_IOCTL(cmd) COMPATIBLE_IOCTL(cmd)
static unsigned int ioctl_pointer[] = {
/* compatible ioctls first */
COMPATIBLE_IOCTL(0x4B50) /* KDGHWCLK - not in the kernel, but don't complain */
COMPATIBLE_IOCTL(0x4B51) /* KDSHWCLK - not in the kernel, but don't complain */
/* Big T */
COMPATIBLE_IOCTL(TCGETA)
COMPATIBLE_IOCTL(TCSETA)
COMPATIBLE_IOCTL(TCSETAW)
COMPATIBLE_IOCTL(TCSETAF)
COMPATIBLE_IOCTL(TCSBRK)
COMPATIBLE_IOCTL(TCXONC)
COMPATIBLE_IOCTL(TCFLSH)
COMPATIBLE_IOCTL(TCGETS)
COMPATIBLE_IOCTL(TCSETS)
COMPATIBLE_IOCTL(TCSETSW)
COMPATIBLE_IOCTL(TCSETSF)
COMPATIBLE_IOCTL(TIOCLINUX)
COMPATIBLE_IOCTL(TIOCSBRK)
COMPATIBLE_IOCTL(TIOCGDEV)
COMPATIBLE_IOCTL(TIOCCBRK)
COMPATIBLE_IOCTL(TIOCGSID)
COMPATIBLE_IOCTL(TIOCGICOUNT)
/* Little t */
COMPATIBLE_IOCTL(TIOCGETD)
COMPATIBLE_IOCTL(TIOCSETD)
COMPATIBLE_IOCTL(TIOCEXCL)
COMPATIBLE_IOCTL(TIOCNXCL)
COMPATIBLE_IOCTL(TIOCCONS)
COMPATIBLE_IOCTL(TIOCGSOFTCAR)
COMPATIBLE_IOCTL(TIOCSSOFTCAR)
COMPATIBLE_IOCTL(TIOCSWINSZ)
COMPATIBLE_IOCTL(TIOCGWINSZ)
COMPATIBLE_IOCTL(TIOCMGET)
COMPATIBLE_IOCTL(TIOCMBIC)
COMPATIBLE_IOCTL(TIOCMBIS)
COMPATIBLE_IOCTL(TIOCMSET)
COMPATIBLE_IOCTL(TIOCPKT)
COMPATIBLE_IOCTL(TIOCNOTTY)
COMPATIBLE_IOCTL(TIOCSTI)
COMPATIBLE_IOCTL(TIOCOUTQ)
COMPATIBLE_IOCTL(TIOCSPGRP)
COMPATIBLE_IOCTL(TIOCGPGRP)
COMPATIBLE_IOCTL(TIOCGPTN)
COMPATIBLE_IOCTL(TIOCSPTLCK)
COMPATIBLE_IOCTL(TIOCSERGETLSR)
COMPATIBLE_IOCTL(TIOCSIG)
#ifdef TIOCSRS485
COMPATIBLE_IOCTL(TIOCSRS485)
#endif
#ifdef TIOCGRS485
COMPATIBLE_IOCTL(TIOCGRS485)
#endif
#ifdef TCGETS2
COMPATIBLE_IOCTL(TCGETS2)
COMPATIBLE_IOCTL(TCSETS2)
COMPATIBLE_IOCTL(TCSETSW2)
COMPATIBLE_IOCTL(TCSETSF2)
#endif
/* Little f */
COMPATIBLE_IOCTL(FIOCLEX)
COMPATIBLE_IOCTL(FIONCLEX)
COMPATIBLE_IOCTL(FIOASYNC)
COMPATIBLE_IOCTL(FIONBIO)
COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */
COMPATIBLE_IOCTL(FS_IOC_FIEMAP)
/* 0x00 */
COMPATIBLE_IOCTL(FIBMAP)
COMPATIBLE_IOCTL(FIGETBSZ)
/* 'X' - originally XFS but some now in the VFS */
COMPATIBLE_IOCTL(FIFREEZE)
COMPATIBLE_IOCTL(FITHAW)
COMPATIBLE_IOCTL(KDGETKEYCODE)
COMPATIBLE_IOCTL(KDSETKEYCODE)
COMPATIBLE_IOCTL(KDGKBTYPE)
COMPATIBLE_IOCTL(KDGETMODE)
COMPATIBLE_IOCTL(KDGKBMODE)
COMPATIBLE_IOCTL(KDGKBMETA)
COMPATIBLE_IOCTL(KDGKBENT)
COMPATIBLE_IOCTL(KDSKBENT)
COMPATIBLE_IOCTL(KDGKBSENT)
COMPATIBLE_IOCTL(KDSKBSENT)
COMPATIBLE_IOCTL(KDGKBDIACR)
COMPATIBLE_IOCTL(KDSKBDIACR)
COMPATIBLE_IOCTL(KDGKBDIACRUC)
COMPATIBLE_IOCTL(KDSKBDIACRUC)
COMPATIBLE_IOCTL(KDKBDREP)
COMPATIBLE_IOCTL(KDGKBLED)
COMPATIBLE_IOCTL(KDGETLED)
#ifdef CONFIG_BLOCK
/* Big S */
COMPATIBLE_IOCTL(SCSI_IOCTL_GET_IDLUN)
COMPATIBLE_IOCTL(SCSI_IOCTL_DOORLOCK)
COMPATIBLE_IOCTL(SCSI_IOCTL_DOORUNLOCK)
COMPATIBLE_IOCTL(SCSI_IOCTL_TEST_UNIT_READY)
COMPATIBLE_IOCTL(SCSI_IOCTL_GET_BUS_NUMBER)
COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND)
COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST)
COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI)
#endif
/* Big V (don't complain on serial console) */
IGNORE_IOCTL(VT_OPENQRY)
IGNORE_IOCTL(VT_GETMODE)
/* Little p (/dev/rtc, /dev/envctrl, etc.) */
COMPATIBLE_IOCTL(RTC_AIE_ON)
COMPATIBLE_IOCTL(RTC_AIE_OFF)
COMPATIBLE_IOCTL(RTC_UIE_ON)
COMPATIBLE_IOCTL(RTC_UIE_OFF)
COMPATIBLE_IOCTL(RTC_PIE_ON)
COMPATIBLE_IOCTL(RTC_PIE_OFF)
COMPATIBLE_IOCTL(RTC_WIE_ON)
COMPATIBLE_IOCTL(RTC_WIE_OFF)
COMPATIBLE_IOCTL(RTC_ALM_SET)
COMPATIBLE_IOCTL(RTC_ALM_READ)
COMPATIBLE_IOCTL(RTC_RD_TIME)
COMPATIBLE_IOCTL(RTC_SET_TIME)
COMPATIBLE_IOCTL(RTC_WKALM_SET)
COMPATIBLE_IOCTL(RTC_WKALM_RD)
/*
* These two are only for the sbus rtc driver, but
* hwclock tries them on every rtc device first when
* running on sparc. On other architectures the entries
* are useless but harmless.
*/
COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
/* Little m */
COMPATIBLE_IOCTL(MTIOCTOP)
/* Socket level stuff */
COMPATIBLE_IOCTL(FIOQSIZE)
#ifdef CONFIG_BLOCK
/* loop */
IGNORE_IOCTL(LOOP_CLR_FD)
/* md calls this on random blockdevs */
IGNORE_IOCTL(RAID_VERSION)
/* qemu/qemu-img might call these two on plain files for probing */
IGNORE_IOCTL(CDROM_DRIVE_STATUS)
IGNORE_IOCTL(FDGETPRM32)
/* SG stuff */
COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
COMPATIBLE_IOCTL(SG_EMULATED_HOST)
COMPATIBLE_IOCTL(SG_GET_TRANSFORM)
COMPATIBLE_IOCTL(SG_SET_RESERVED_SIZE)
COMPATIBLE_IOCTL(SG_GET_RESERVED_SIZE)
COMPATIBLE_IOCTL(SG_GET_SCSI_ID)
COMPATIBLE_IOCTL(SG_SET_FORCE_LOW_DMA)
COMPATIBLE_IOCTL(SG_GET_LOW_DMA)
COMPATIBLE_IOCTL(SG_SET_FORCE_PACK_ID)
COMPATIBLE_IOCTL(SG_GET_PACK_ID)
COMPATIBLE_IOCTL(SG_GET_NUM_WAITING)
COMPATIBLE_IOCTL(SG_SET_DEBUG)
COMPATIBLE_IOCTL(SG_GET_SG_TABLESIZE)
COMPATIBLE_IOCTL(SG_GET_COMMAND_Q)
COMPATIBLE_IOCTL(SG_SET_COMMAND_Q)
COMPATIBLE_IOCTL(SG_GET_VERSION_NUM)
COMPATIBLE_IOCTL(SG_NEXT_CMD_LEN)
COMPATIBLE_IOCTL(SG_SCSI_RESET)
COMPATIBLE_IOCTL(SG_GET_REQUEST_TABLE)
COMPATIBLE_IOCTL(SG_SET_KEEP_ORPHAN)
COMPATIBLE_IOCTL(SG_GET_KEEP_ORPHAN)
#endif
/* PPP stuff */
COMPATIBLE_IOCTL(PPPIOCGFLAGS)
COMPATIBLE_IOCTL(PPPIOCSFLAGS)
COMPATIBLE_IOCTL(PPPIOCGASYNCMAP)
COMPATIBLE_IOCTL(PPPIOCSASYNCMAP)
COMPATIBLE_IOCTL(PPPIOCGUNIT)
COMPATIBLE_IOCTL(PPPIOCGRASYNCMAP)
COMPATIBLE_IOCTL(PPPIOCSRASYNCMAP)
COMPATIBLE_IOCTL(PPPIOCGMRU)
COMPATIBLE_IOCTL(PPPIOCSMRU)
COMPATIBLE_IOCTL(PPPIOCSMAXCID)
COMPATIBLE_IOCTL(PPPIOCGXASYNCMAP)
COMPATIBLE_IOCTL(PPPIOCSXASYNCMAP)
COMPATIBLE_IOCTL(PPPIOCXFERUNIT)
/* PPPIOCSCOMPRESS is translated */
COMPATIBLE_IOCTL(PPPIOCGNPMODE)
COMPATIBLE_IOCTL(PPPIOCSNPMODE)
COMPATIBLE_IOCTL(PPPIOCGDEBUG)
COMPATIBLE_IOCTL(PPPIOCSDEBUG)
/* PPPIOCSPASS is translated */
/* PPPIOCSACTIVE is translated */
/* PPPIOCGIDLE is translated */
COMPATIBLE_IOCTL(PPPIOCNEWUNIT)
COMPATIBLE_IOCTL(PPPIOCATTACH)
COMPATIBLE_IOCTL(PPPIOCDETACH)
COMPATIBLE_IOCTL(PPPIOCSMRRU)
COMPATIBLE_IOCTL(PPPIOCCONNECT)
COMPATIBLE_IOCTL(PPPIOCDISCONN)
COMPATIBLE_IOCTL(PPPIOCATTCHAN)
COMPATIBLE_IOCTL(PPPIOCGCHAN)
COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
/* PPPOX */
COMPATIBLE_IOCTL(PPPOEIOCSFWD)
COMPATIBLE_IOCTL(PPPOEIOCDFWD)
/* ppdev */
COMPATIBLE_IOCTL(PPSETMODE)
COMPATIBLE_IOCTL(PPRSTATUS)
COMPATIBLE_IOCTL(PPRCONTROL)
COMPATIBLE_IOCTL(PPWCONTROL)
COMPATIBLE_IOCTL(PPFCONTROL)
COMPATIBLE_IOCTL(PPRDATA)
COMPATIBLE_IOCTL(PPWDATA)
COMPATIBLE_IOCTL(PPCLAIM)
COMPATIBLE_IOCTL(PPRELEASE)
COMPATIBLE_IOCTL(PPYIELD)
COMPATIBLE_IOCTL(PPEXCL)
COMPATIBLE_IOCTL(PPDATADIR)
COMPATIBLE_IOCTL(PPNEGOT)
COMPATIBLE_IOCTL(PPWCTLONIRQ)
COMPATIBLE_IOCTL(PPCLRIRQ)
COMPATIBLE_IOCTL(PPSETPHASE)
COMPATIBLE_IOCTL(PPGETMODES)
COMPATIBLE_IOCTL(PPGETMODE)
COMPATIBLE_IOCTL(PPGETPHASE)
COMPATIBLE_IOCTL(PPGETFLAGS)
COMPATIBLE_IOCTL(PPSETFLAGS)
/* Big A */
/* sparc only */
/* Big Q for sound/OSS */
COMPATIBLE_IOCTL(SNDCTL_SEQ_RESET)
COMPATIBLE_IOCTL(SNDCTL_SEQ_SYNC)
COMPATIBLE_IOCTL(SNDCTL_SYNTH_INFO)
COMPATIBLE_IOCTL(SNDCTL_SEQ_CTRLRATE)
COMPATIBLE_IOCTL(SNDCTL_SEQ_GETOUTCOUNT)
COMPATIBLE_IOCTL(SNDCTL_SEQ_GETINCOUNT)
COMPATIBLE_IOCTL(SNDCTL_SEQ_PERCMODE)
COMPATIBLE_IOCTL(SNDCTL_FM_LOAD_INSTR)
COMPATIBLE_IOCTL(SNDCTL_SEQ_TESTMIDI)
COMPATIBLE_IOCTL(SNDCTL_SEQ_RESETSAMPLES)
COMPATIBLE_IOCTL(SNDCTL_SEQ_NRSYNTHS)
COMPATIBLE_IOCTL(SNDCTL_SEQ_NRMIDIS)
COMPATIBLE_IOCTL(SNDCTL_MIDI_INFO)
COMPATIBLE_IOCTL(SNDCTL_SEQ_THRESHOLD)
COMPATIBLE_IOCTL(SNDCTL_SYNTH_MEMAVL)
COMPATIBLE_IOCTL(SNDCTL_FM_4OP_ENABLE)
COMPATIBLE_IOCTL(SNDCTL_SEQ_PANIC)
COMPATIBLE_IOCTL(SNDCTL_SEQ_OUTOFBAND)
COMPATIBLE_IOCTL(SNDCTL_SEQ_GETTIME)
COMPATIBLE_IOCTL(SNDCTL_SYNTH_ID)
COMPATIBLE_IOCTL(SNDCTL_SYNTH_CONTROL)
COMPATIBLE_IOCTL(SNDCTL_SYNTH_REMOVESAMPLE)
/* Big T for sound/OSS */
COMPATIBLE_IOCTL(SNDCTL_TMR_TIMEBASE)
COMPATIBLE_IOCTL(SNDCTL_TMR_START)
COMPATIBLE_IOCTL(SNDCTL_TMR_STOP)
COMPATIBLE_IOCTL(SNDCTL_TMR_CONTINUE)
COMPATIBLE_IOCTL(SNDCTL_TMR_TEMPO)
COMPATIBLE_IOCTL(SNDCTL_TMR_SOURCE)
COMPATIBLE_IOCTL(SNDCTL_TMR_METRONOME)
COMPATIBLE_IOCTL(SNDCTL_TMR_SELECT)
/* Little m for sound/OSS */
COMPATIBLE_IOCTL(SNDCTL_MIDI_PRETIME)
COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUMODE)
COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUCMD)
/* Big P for sound/OSS */
COMPATIBLE_IOCTL(SNDCTL_DSP_RESET)
COMPATIBLE_IOCTL(SNDCTL_DSP_SYNC)
COMPATIBLE_IOCTL(SNDCTL_DSP_SPEED)
COMPATIBLE_IOCTL(SNDCTL_DSP_STEREO)
COMPATIBLE_IOCTL(SNDCTL_DSP_GETBLKSIZE)
COMPATIBLE_IOCTL(SNDCTL_DSP_CHANNELS)
COMPATIBLE_IOCTL(SOUND_PCM_WRITE_FILTER)
COMPATIBLE_IOCTL(SNDCTL_DSP_POST)
COMPATIBLE_IOCTL(SNDCTL_DSP_SUBDIVIDE)
COMPATIBLE_IOCTL(SNDCTL_DSP_SETFRAGMENT)
COMPATIBLE_IOCTL(SNDCTL_DSP_GETFMTS)
COMPATIBLE_IOCTL(SNDCTL_DSP_SETFMT)
COMPATIBLE_IOCTL(SNDCTL_DSP_GETOSPACE)
COMPATIBLE_IOCTL(SNDCTL_DSP_GETISPACE)
COMPATIBLE_IOCTL(SNDCTL_DSP_NONBLOCK)
COMPATIBLE_IOCTL(SNDCTL_DSP_GETCAPS)
COMPATIBLE_IOCTL(SNDCTL_DSP_GETTRIGGER)
COMPATIBLE_IOCTL(SNDCTL_DSP_SETTRIGGER)
COMPATIBLE_IOCTL(SNDCTL_DSP_GETIPTR)
COMPATIBLE_IOCTL(SNDCTL_DSP_GETOPTR)
/* SNDCTL_DSP_MAPINBUF, XXX needs translation */
/* SNDCTL_DSP_MAPOUTBUF, XXX needs translation */
COMPATIBLE_IOCTL(SNDCTL_DSP_SETSYNCRO)
COMPATIBLE_IOCTL(SNDCTL_DSP_SETDUPLEX)
COMPATIBLE_IOCTL(SNDCTL_DSP_GETODELAY)
COMPATIBLE_IOCTL(SNDCTL_DSP_PROFILE)
COMPATIBLE_IOCTL(SOUND_PCM_READ_RATE)
COMPATIBLE_IOCTL(SOUND_PCM_READ_CHANNELS)
COMPATIBLE_IOCTL(SOUND_PCM_READ_BITS)
COMPATIBLE_IOCTL(SOUND_PCM_READ_FILTER)
/* Big C for sound/OSS */
COMPATIBLE_IOCTL(SNDCTL_COPR_RESET)
COMPATIBLE_IOCTL(SNDCTL_COPR_LOAD)
COMPATIBLE_IOCTL(SNDCTL_COPR_RDATA)
COMPATIBLE_IOCTL(SNDCTL_COPR_RCODE)
COMPATIBLE_IOCTL(SNDCTL_COPR_WDATA)
COMPATIBLE_IOCTL(SNDCTL_COPR_WCODE)
COMPATIBLE_IOCTL(SNDCTL_COPR_RUN)
COMPATIBLE_IOCTL(SNDCTL_COPR_HALT)
COMPATIBLE_IOCTL(SNDCTL_COPR_SENDMSG)
COMPATIBLE_IOCTL(SNDCTL_COPR_RCVMSG)
/* Big M for sound/OSS */
COMPATIBLE_IOCTL(SOUND_MIXER_READ_VOLUME)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_BASS)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_TREBLE)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_SYNTH)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_PCM)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_SPEAKER)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_MIC)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_CD)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_IMIX)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_ALTPCM)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECLEV)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_IGAIN)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_OGAIN)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE1)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE2)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE3)
COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL1))
COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL2))
COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL3))
COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEIN))
COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEOUT))
COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_VIDEO))
COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_RADIO))
COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_MONITOR))
COMPATIBLE_IOCTL(SOUND_MIXER_READ_MUTE)
/* SOUND_MIXER_READ_ENHANCE, same value as READ_MUTE */
/* SOUND_MIXER_READ_LOUD, same value as READ_MUTE */
COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECSRC)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_DEVMASK)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECMASK)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_STEREODEVS)
COMPATIBLE_IOCTL(SOUND_MIXER_READ_CAPS)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_VOLUME)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_BASS)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_TREBLE)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SYNTH)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_PCM)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SPEAKER)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MIC)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_CD)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IMIX)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_ALTPCM)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECLEV)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IGAIN)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_OGAIN)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE1)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE2)
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE3)
COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL1))
COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL2))
COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL3))
COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEIN))
COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEOUT))
COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_VIDEO))
COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_RADIO))
COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_MONITOR))
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MUTE)
/* SOUND_MIXER_WRITE_ENHANCE, same value as WRITE_MUTE */
/* SOUND_MIXER_WRITE_LOUD, same value as WRITE_MUTE */
COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECSRC)
COMPATIBLE_IOCTL(SOUND_MIXER_INFO)
COMPATIBLE_IOCTL(SOUND_OLD_MIXER_INFO)
COMPATIBLE_IOCTL(SOUND_MIXER_ACCESS)
COMPATIBLE_IOCTL(SOUND_MIXER_AGC)
COMPATIBLE_IOCTL(SOUND_MIXER_3DSE)
COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE1)
COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE2)
COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE3)
COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE4)
COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE5)
COMPATIBLE_IOCTL(SOUND_MIXER_GETLEVELS)
COMPATIBLE_IOCTL(SOUND_MIXER_SETLEVELS)
COMPATIBLE_IOCTL(OSS_GETVERSION)
/* Raw devices */
COMPATIBLE_IOCTL(RAW_SETBIND)
COMPATIBLE_IOCTL(RAW_GETBIND)
/* Watchdog */
COMPATIBLE_IOCTL(WDIOC_GETSUPPORT)
COMPATIBLE_IOCTL(WDIOC_GETSTATUS)
COMPATIBLE_IOCTL(WDIOC_GETBOOTSTATUS)
COMPATIBLE_IOCTL(WDIOC_GETTEMP)
COMPATIBLE_IOCTL(WDIOC_SETOPTIONS)
COMPATIBLE_IOCTL(WDIOC_KEEPALIVE)
COMPATIBLE_IOCTL(WDIOC_SETTIMEOUT)
COMPATIBLE_IOCTL(WDIOC_GETTIMEOUT)
/* Big R */
COMPATIBLE_IOCTL(RNDGETENTCNT)
COMPATIBLE_IOCTL(RNDADDTOENTCNT)
COMPATIBLE_IOCTL(RNDGETPOOL)
COMPATIBLE_IOCTL(RNDADDENTROPY)
COMPATIBLE_IOCTL(RNDZAPENTCNT)
COMPATIBLE_IOCTL(RNDCLEARPOOL)
/* Bluetooth */
COMPATIBLE_IOCTL(HCIDEVUP)
COMPATIBLE_IOCTL(HCIDEVDOWN)
COMPATIBLE_IOCTL(HCIDEVRESET)
COMPATIBLE_IOCTL(HCIDEVRESTAT)
COMPATIBLE_IOCTL(HCIGETDEVLIST)
COMPATIBLE_IOCTL(HCIGETDEVINFO)
COMPATIBLE_IOCTL(HCIGETCONNLIST)
COMPATIBLE_IOCTL(HCIGETCONNINFO)
COMPATIBLE_IOCTL(HCIGETAUTHINFO)
COMPATIBLE_IOCTL(HCISETRAW)
COMPATIBLE_IOCTL(HCISETSCAN)
COMPATIBLE_IOCTL(HCISETAUTH)
COMPATIBLE_IOCTL(HCISETENCRYPT)
COMPATIBLE_IOCTL(HCISETPTYPE)
COMPATIBLE_IOCTL(HCISETLINKPOL)
COMPATIBLE_IOCTL(HCISETLINKMODE)
COMPATIBLE_IOCTL(HCISETACLMTU)
COMPATIBLE_IOCTL(HCISETSCOMTU)
COMPATIBLE_IOCTL(HCIBLOCKADDR)
COMPATIBLE_IOCTL(HCIUNBLOCKADDR)
COMPATIBLE_IOCTL(HCIINQUIRY)
COMPATIBLE_IOCTL(HCIUARTSETPROTO)
COMPATIBLE_IOCTL(HCIUARTGETPROTO)
COMPATIBLE_IOCTL(RFCOMMCREATEDEV)
COMPATIBLE_IOCTL(RFCOMMRELEASEDEV)
COMPATIBLE_IOCTL(RFCOMMGETDEVLIST)
COMPATIBLE_IOCTL(RFCOMMGETDEVINFO)
COMPATIBLE_IOCTL(RFCOMMSTEALDLC)
COMPATIBLE_IOCTL(BNEPCONNADD)
COMPATIBLE_IOCTL(BNEPCONNDEL)
COMPATIBLE_IOCTL(BNEPGETCONNLIST)
COMPATIBLE_IOCTL(BNEPGETCONNINFO)
COMPATIBLE_IOCTL(CMTPCONNADD)
COMPATIBLE_IOCTL(CMTPCONNDEL)
COMPATIBLE_IOCTL(CMTPGETCONNLIST)
COMPATIBLE_IOCTL(CMTPGETCONNINFO)
COMPATIBLE_IOCTL(HIDPCONNADD)
COMPATIBLE_IOCTL(HIDPCONNDEL)
COMPATIBLE_IOCTL(HIDPGETCONNLIST)
COMPATIBLE_IOCTL(HIDPGETCONNINFO)
/* CAPI */
COMPATIBLE_IOCTL(CAPI_REGISTER)
COMPATIBLE_IOCTL(CAPI_GET_MANUFACTURER)
COMPATIBLE_IOCTL(CAPI_GET_VERSION)
COMPATIBLE_IOCTL(CAPI_GET_SERIAL)
COMPATIBLE_IOCTL(CAPI_GET_PROFILE)
COMPATIBLE_IOCTL(CAPI_MANUFACTURER_CMD)
COMPATIBLE_IOCTL(CAPI_GET_ERRCODE)
COMPATIBLE_IOCTL(CAPI_INSTALLED)
COMPATIBLE_IOCTL(CAPI_GET_FLAGS)
COMPATIBLE_IOCTL(CAPI_SET_FLAGS)
COMPATIBLE_IOCTL(CAPI_CLR_FLAGS)
COMPATIBLE_IOCTL(CAPI_NCCI_OPENCOUNT)
COMPATIBLE_IOCTL(CAPI_NCCI_GETUNIT)
/* Siemens Gigaset */
COMPATIBLE_IOCTL(GIGASET_REDIR)
COMPATIBLE_IOCTL(GIGASET_CONFIG)
COMPATIBLE_IOCTL(GIGASET_BRKCHARS)
COMPATIBLE_IOCTL(GIGASET_VERSION)
/* Misc. */
COMPATIBLE_IOCTL(0x41545900) /* ATYIO_CLKR */
COMPATIBLE_IOCTL(0x41545901) /* ATYIO_CLKW */
COMPATIBLE_IOCTL(PCIIOC_CONTROLLER)
COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_IO)
COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_MEM)
COMPATIBLE_IOCTL(PCIIOC_WRITE_COMBINE)
/* NBD */
COMPATIBLE_IOCTL(NBD_DO_IT)
COMPATIBLE_IOCTL(NBD_CLEAR_SOCK)
COMPATIBLE_IOCTL(NBD_CLEAR_QUE)
COMPATIBLE_IOCTL(NBD_PRINT_DEBUG)
COMPATIBLE_IOCTL(NBD_DISCONNECT)
/* i2c */
COMPATIBLE_IOCTL(I2C_SLAVE)
COMPATIBLE_IOCTL(I2C_SLAVE_FORCE)
COMPATIBLE_IOCTL(I2C_TENBIT)
COMPATIBLE_IOCTL(I2C_PEC)
COMPATIBLE_IOCTL(I2C_RETRIES)
COMPATIBLE_IOCTL(I2C_TIMEOUT)
/* hiddev */
COMPATIBLE_IOCTL(HIDIOCGVERSION)
COMPATIBLE_IOCTL(HIDIOCAPPLICATION)
COMPATIBLE_IOCTL(HIDIOCGDEVINFO)
COMPATIBLE_IOCTL(HIDIOCGSTRING)
COMPATIBLE_IOCTL(HIDIOCINITREPORT)
COMPATIBLE_IOCTL(HIDIOCGREPORT)
COMPATIBLE_IOCTL(HIDIOCSREPORT)
COMPATIBLE_IOCTL(HIDIOCGREPORTINFO)
COMPATIBLE_IOCTL(HIDIOCGFIELDINFO)
COMPATIBLE_IOCTL(HIDIOCGUSAGE)
COMPATIBLE_IOCTL(HIDIOCSUSAGE)
COMPATIBLE_IOCTL(HIDIOCGUCODE)
COMPATIBLE_IOCTL(HIDIOCGFLAG)
COMPATIBLE_IOCTL(HIDIOCSFLAG)
COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINDEX)
COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINFO)
/* dvb */
COMPATIBLE_IOCTL(AUDIO_STOP)
COMPATIBLE_IOCTL(AUDIO_PLAY)
COMPATIBLE_IOCTL(AUDIO_PAUSE)
COMPATIBLE_IOCTL(AUDIO_CONTINUE)
COMPATIBLE_IOCTL(AUDIO_SELECT_SOURCE)
COMPATIBLE_IOCTL(AUDIO_SET_MUTE)
COMPATIBLE_IOCTL(AUDIO_SET_AV_SYNC)
COMPATIBLE_IOCTL(AUDIO_SET_BYPASS_MODE)
COMPATIBLE_IOCTL(AUDIO_CHANNEL_SELECT)
COMPATIBLE_IOCTL(AUDIO_GET_STATUS)
COMPATIBLE_IOCTL(AUDIO_GET_CAPABILITIES)
COMPATIBLE_IOCTL(AUDIO_CLEAR_BUFFER)
COMPATIBLE_IOCTL(AUDIO_SET_ID)
COMPATIBLE_IOCTL(AUDIO_SET_MIXER)
COMPATIBLE_IOCTL(AUDIO_SET_STREAMTYPE)
COMPATIBLE_IOCTL(AUDIO_SET_EXT_ID)
COMPATIBLE_IOCTL(AUDIO_SET_ATTRIBUTES)
COMPATIBLE_IOCTL(AUDIO_SET_KARAOKE)
COMPATIBLE_IOCTL(DMX_START)
COMPATIBLE_IOCTL(DMX_STOP)
COMPATIBLE_IOCTL(DMX_SET_FILTER)
COMPATIBLE_IOCTL(DMX_SET_PES_FILTER)
COMPATIBLE_IOCTL(DMX_SET_BUFFER_SIZE)
COMPATIBLE_IOCTL(DMX_GET_PES_PIDS)
COMPATIBLE_IOCTL(DMX_GET_CAPS)
COMPATIBLE_IOCTL(DMX_SET_SOURCE)
COMPATIBLE_IOCTL(DMX_GET_STC)
COMPATIBLE_IOCTL(FE_GET_INFO)
COMPATIBLE_IOCTL(FE_DISEQC_RESET_OVERLOAD)
COMPATIBLE_IOCTL(FE_DISEQC_SEND_MASTER_CMD)
COMPATIBLE_IOCTL(FE_DISEQC_RECV_SLAVE_REPLY)
COMPATIBLE_IOCTL(FE_DISEQC_SEND_BURST)
COMPATIBLE_IOCTL(FE_SET_TONE)
COMPATIBLE_IOCTL(FE_SET_VOLTAGE)
COMPATIBLE_IOCTL(FE_ENABLE_HIGH_LNB_VOLTAGE)
COMPATIBLE_IOCTL(FE_READ_STATUS)
COMPATIBLE_IOCTL(FE_READ_BER)
COMPATIBLE_IOCTL(FE_READ_SIGNAL_STRENGTH)
COMPATIBLE_IOCTL(FE_READ_SNR)
COMPATIBLE_IOCTL(FE_READ_UNCORRECTED_BLOCKS)
COMPATIBLE_IOCTL(FE_SET_FRONTEND)
COMPATIBLE_IOCTL(FE_GET_FRONTEND)
COMPATIBLE_IOCTL(FE_GET_EVENT)
COMPATIBLE_IOCTL(FE_DISHNETWORK_SEND_LEGACY_CMD)
COMPATIBLE_IOCTL(VIDEO_STOP)
COMPATIBLE_IOCTL(VIDEO_PLAY)
COMPATIBLE_IOCTL(VIDEO_FREEZE)
COMPATIBLE_IOCTL(VIDEO_CONTINUE)
COMPATIBLE_IOCTL(VIDEO_SELECT_SOURCE)
COMPATIBLE_IOCTL(VIDEO_SET_BLANK)
COMPATIBLE_IOCTL(VIDEO_GET_STATUS)
COMPATIBLE_IOCTL(VIDEO_SET_DISPLAY_FORMAT)
COMPATIBLE_IOCTL(VIDEO_FAST_FORWARD)
COMPATIBLE_IOCTL(VIDEO_SLOWMOTION)
COMPATIBLE_IOCTL(VIDEO_GET_CAPABILITIES)
COMPATIBLE_IOCTL(VIDEO_CLEAR_BUFFER)
COMPATIBLE_IOCTL(VIDEO_SET_ID)
COMPATIBLE_IOCTL(VIDEO_SET_STREAMTYPE)
COMPATIBLE_IOCTL(VIDEO_SET_FORMAT)
COMPATIBLE_IOCTL(VIDEO_SET_SYSTEM)
COMPATIBLE_IOCTL(VIDEO_SET_HIGHLIGHT)
COMPATIBLE_IOCTL(VIDEO_SET_SPU)
COMPATIBLE_IOCTL(VIDEO_GET_NAVI)
COMPATIBLE_IOCTL(VIDEO_SET_ATTRIBUTES)
COMPATIBLE_IOCTL(VIDEO_GET_SIZE)
COMPATIBLE_IOCTL(VIDEO_GET_FRAME_RATE)
/* joystick */
COMPATIBLE_IOCTL(JSIOCGVERSION)
COMPATIBLE_IOCTL(JSIOCGAXES)
COMPATIBLE_IOCTL(JSIOCGBUTTONS)
COMPATIBLE_IOCTL(JSIOCGNAME(0))
#ifdef TIOCGLTC
COMPATIBLE_IOCTL(TIOCGLTC)
COMPATIBLE_IOCTL(TIOCSLTC)
#endif
#ifdef TIOCSTART
/*
* For these two we have definitions in ioctls.h and/or termios.h on
* some architectures but no actual implemention. Some applications
* like bash call them if they are defined in the headers, so we provide
* entries here to avoid syslog message spew.
*/
COMPATIBLE_IOCTL(TIOCSTART)
COMPATIBLE_IOCTL(TIOCSTOP)
#endif
/* fat 'r' ioctls. These are handled by fat with ->compat_ioctl,
but we don't want warnings on other file systems. So declare
them as compatible here. */
#define VFAT_IOCTL_READDIR_BOTH32 _IOR('r', 1, struct compat_dirent[2])
#define VFAT_IOCTL_READDIR_SHORT32 _IOR('r', 2, struct compat_dirent[2])
IGNORE_IOCTL(VFAT_IOCTL_READDIR_BOTH32)
IGNORE_IOCTL(VFAT_IOCTL_READDIR_SHORT32)
#ifdef CONFIG_SPARC
/* Sparc framebuffers, handled in sbusfb_compat_ioctl() */
IGNORE_IOCTL(FBIOGTYPE)
IGNORE_IOCTL(FBIOSATTR)
IGNORE_IOCTL(FBIOGATTR)
IGNORE_IOCTL(FBIOSVIDEO)
IGNORE_IOCTL(FBIOGVIDEO)
IGNORE_IOCTL(FBIOSCURPOS)
IGNORE_IOCTL(FBIOGCURPOS)
IGNORE_IOCTL(FBIOGCURMAX)
IGNORE_IOCTL(FBIOPUTCMAP32)
IGNORE_IOCTL(FBIOGETCMAP32)
IGNORE_IOCTL(FBIOSCURSOR32)
IGNORE_IOCTL(FBIOGCURSOR32)
#endif
};
/*
* Convert common ioctl arguments based on their command number
*
* Please do not add any code in here. Instead, implement
* a compat_ioctl operation in the place that handleѕ the
* ioctl for the native case.
*/
static long do_ioctl_trans(int fd, unsigned int cmd,
unsigned long arg, struct file *file)
{
void __user *argp = compat_ptr(arg);
switch (cmd) {
case PPPIOCGIDLE32:
return ppp_gidle(fd, cmd, argp);
case PPPIOCSCOMPRESS32:
return ppp_scompress(fd, cmd, argp);
case PPPIOCSPASS32:
case PPPIOCSACTIVE32:
return ppp_sock_fprog_ioctl_trans(fd, cmd, argp);
#ifdef CONFIG_BLOCK
case SG_IO:
return sg_ioctl_trans(fd, cmd, argp);
case SG_GET_REQUEST_TABLE:
return sg_grt_trans(fd, cmd, argp);
case MTIOCGET32:
case MTIOCPOS32:
return mt_ioctl_trans(fd, cmd, argp);
#endif
/* Serial */
case TIOCGSERIAL:
case TIOCSSERIAL:
return serial_struct_ioctl(fd, cmd, argp);
/* i2c */
case I2C_FUNCS:
return w_long(fd, cmd, argp);
case I2C_RDWR:
return do_i2c_rdwr_ioctl(fd, cmd, argp);
case I2C_SMBUS:
return do_i2c_smbus_ioctl(fd, cmd, argp);
/* Not implemented in the native kernel */
case RTC_IRQP_READ32:
case RTC_IRQP_SET32:
case RTC_EPOCH_READ32:
case RTC_EPOCH_SET32:
return rtc_ioctl(fd, cmd, argp);
/* dvb */
case VIDEO_GET_EVENT:
return do_video_get_event(fd, cmd, argp);
case VIDEO_STILLPICTURE:
return do_video_stillpicture(fd, cmd, argp);
case VIDEO_SET_SPU_PALETTE:
return do_video_set_spu_palette(fd, cmd, argp);
}
/*
* These take an integer instead of a pointer as 'arg',
* so we must not do a compat_ptr() translation.
*/
switch (cmd) {
/* Big T */
case TCSBRKP:
case TIOCMIWAIT:
case TIOCSCTTY:
/* RAID */
case HOT_REMOVE_DISK:
case HOT_ADD_DISK:
case SET_DISK_FAULTY:
case SET_BITMAP_FILE:
/* Big K */
case KDSIGACCEPT:
case KIOCSOUND:
case KDMKTONE:
case KDSETMODE:
case KDSKBMODE:
case KDSKBMETA:
case KDSKBLED:
case KDSETLED:
/* NBD */
case NBD_SET_SOCK:
case NBD_SET_BLKSIZE:
case NBD_SET_SIZE:
case NBD_SET_SIZE_BLOCKS:
return do_vfs_ioctl(file, fd, cmd, arg);
}
return -ENOIOCTLCMD;
}
static int compat_ioctl_check_table(unsigned int xcmd)
{
int i;
const int max = ARRAY_SIZE(ioctl_pointer) - 1;
BUILD_BUG_ON(max >= (1 << 16));
/* guess initial offset into table, assuming a
normalized distribution */
i = ((xcmd >> 16) * max) >> 16;
/* do linear search up first, until greater or equal */
while (ioctl_pointer[i] < xcmd && i < max)
i++;
/* then do linear search down */
while (ioctl_pointer[i] > xcmd && i > 0)
i--;
return ioctl_pointer[i] == xcmd;
}
asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
unsigned long arg)
{
struct fd f = fdget(fd);
int error = -EBADF;
if (!f.file)
goto out;
/* RED-PEN how should LSM module know it's handling 32bit? */
error = security_file_ioctl(f.file, cmd, arg);
if (error)
goto out_fput;
/*
* To allow the compat_ioctl handlers to be self contained
* we need to check the common ioctls here first.
* Just handle them with the standard handlers below.
*/
switch (cmd) {
case FIOCLEX:
case FIONCLEX:
case FIONBIO:
case FIOASYNC:
case FIOQSIZE:
break;
#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
case FS_IOC_RESVSP_32:
case FS_IOC_RESVSP64_32:
error = compat_ioctl_preallocate(f.file, compat_ptr(arg));
goto out_fput;
#else
case FS_IOC_RESVSP:
case FS_IOC_RESVSP64:
error = ioctl_preallocate(f.file, compat_ptr(arg));
goto out_fput;
#endif
case FIBMAP:
case FIGETBSZ:
case FIONREAD:
if (S_ISREG(f.file->f_path.dentry->d_inode->i_mode))
break;
/*FALL THROUGH*/
default:
if (f.file->f_op && f.file->f_op->compat_ioctl) {
error = f.file->f_op->compat_ioctl(f.file, cmd, arg);
if (error != -ENOIOCTLCMD)
goto out_fput;
}
if (!f.file->f_op || !f.file->f_op->unlocked_ioctl)
goto do_ioctl;
break;
}
if (compat_ioctl_check_table(XFORM(cmd)))
goto found_handler;
error = do_ioctl_trans(fd, cmd, arg, f.file);
if (error == -ENOIOCTLCMD)
error = -ENOTTY;
goto out_fput;
found_handler:
arg = (unsigned long)compat_ptr(arg);
do_ioctl:
error = do_vfs_ioctl(f.file, fd, cmd, arg);
out_fput:
fdput(f);
out:
return error;
}
static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
{
unsigned int a, b;
a = *(unsigned int *)p;
b = *(unsigned int *)q;
if (a > b)
return 1;
if (a < b)
return -1;
return 0;
}
static int __init init_sys32_ioctl(void)
{
sort(ioctl_pointer, ARRAY_SIZE(ioctl_pointer), sizeof(*ioctl_pointer),
init_sys32_ioctl_cmp, NULL);
return 0;
}
__initcall(init_sys32_ioctl);
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_5609_0 |
crossvul-cpp_data_bad_3444_0 | /*
* Copyright (C) 2004 IBM Corporation
*
* Authors:
* Leendert van Doorn <leendert@watson.ibm.com>
* Dave Safford <safford@watson.ibm.com>
* Reiner Sailer <sailer@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
* Device driver for TCG/TCPA TPM (trusted platform module).
* Specifications at www.trustedcomputinggroup.org
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
* Note, the TPM chip is not interrupt driven (only polling)
* and can have very long timeouts (minutes!). Hence the unusual
* calls to msleep.
*
*/
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include "tpm.h"
enum tpm_const {
TPM_MINOR = 224, /* officially assigned */
TPM_BUFSIZE = 4096,
TPM_NUM_DEVICES = 256,
};
enum tpm_duration {
TPM_SHORT = 0,
TPM_MEDIUM = 1,
TPM_LONG = 2,
TPM_UNDEFINED,
};
#define TPM_MAX_ORDINAL 243
#define TPM_MAX_PROTECTED_ORDINAL 12
#define TPM_PROTECTED_ORDINAL_MASK 0xFF
/*
* Bug workaround - some TPM's don't flush the most
* recently changed pcr on suspend, so force the flush
* with an extend to the selected _unused_ non-volatile pcr.
*/
static int tpm_suspend_pcr;
module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644);
MODULE_PARM_DESC(suspend_pcr,
"PCR to use for dummy writes to faciltate flush on suspend.");
static LIST_HEAD(tpm_chip_list);
static DEFINE_SPINLOCK(driver_lock);
static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
/*
* Array with one entry per ordinal defining the maximum amount
* of time the chip could take to return the result. The ordinal
* designation of short, medium or long is defined in a table in
* TCG Specification TPM Main Part 2 TPM Structures Section 17. The
* values of the SHORT, MEDIUM, and LONG durations are retrieved
* from the chip during initialization with a call to tpm_get_timeouts.
*/
static const u8 tpm_protected_ordinal_duration[TPM_MAX_PROTECTED_ORDINAL] = {
TPM_UNDEFINED, /* 0 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 5 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 10 */
TPM_SHORT,
};
static const u8 tpm_ordinal_duration[TPM_MAX_ORDINAL] = {
TPM_UNDEFINED, /* 0 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 5 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 10 */
TPM_SHORT,
TPM_MEDIUM,
TPM_LONG,
TPM_LONG,
TPM_MEDIUM, /* 15 */
TPM_SHORT,
TPM_SHORT,
TPM_MEDIUM,
TPM_LONG,
TPM_SHORT, /* 20 */
TPM_SHORT,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_SHORT, /* 25 */
TPM_SHORT,
TPM_MEDIUM,
TPM_SHORT,
TPM_SHORT,
TPM_MEDIUM, /* 30 */
TPM_LONG,
TPM_MEDIUM,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT, /* 35 */
TPM_MEDIUM,
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_MEDIUM, /* 40 */
TPM_LONG,
TPM_MEDIUM,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT, /* 45 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_LONG,
TPM_MEDIUM, /* 50 */
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 55 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_MEDIUM, /* 60 */
TPM_MEDIUM,
TPM_MEDIUM,
TPM_SHORT,
TPM_SHORT,
TPM_MEDIUM, /* 65 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 70 */
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 75 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_LONG, /* 80 */
TPM_UNDEFINED,
TPM_MEDIUM,
TPM_LONG,
TPM_SHORT,
TPM_UNDEFINED, /* 85 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 90 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED, /* 95 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_MEDIUM, /* 100 */
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 105 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 110 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT, /* 115 */
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_LONG, /* 120 */
TPM_LONG,
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_SHORT,
TPM_SHORT, /* 125 */
TPM_SHORT,
TPM_LONG,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT, /* 130 */
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_SHORT,
TPM_MEDIUM,
TPM_UNDEFINED, /* 135 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 140 */
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 145 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 150 */
TPM_MEDIUM,
TPM_MEDIUM,
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED, /* 155 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 160 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 165 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_LONG, /* 170 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 175 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_MEDIUM, /* 180 */
TPM_SHORT,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_MEDIUM, /* 185 */
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 190 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 195 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 200 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT,
TPM_SHORT, /* 205 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_MEDIUM, /* 210 */
TPM_UNDEFINED,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_UNDEFINED, /* 215 */
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT,
TPM_SHORT, /* 220 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED, /* 225 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 230 */
TPM_LONG,
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 235 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 240 */
TPM_UNDEFINED,
TPM_MEDIUM,
};
static void user_reader_timeout(unsigned long ptr)
{
struct tpm_chip *chip = (struct tpm_chip *) ptr;
schedule_work(&chip->work);
}
static void timeout_work(struct work_struct *work)
{
struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
mutex_lock(&chip->buffer_mutex);
atomic_set(&chip->data_pending, 0);
memset(chip->data_buffer, 0, TPM_BUFSIZE);
mutex_unlock(&chip->buffer_mutex);
}
/*
* Returns max number of jiffies to wait
*/
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
u32 ordinal)
{
int duration_idx = TPM_UNDEFINED;
int duration = 0;
if (ordinal < TPM_MAX_ORDINAL)
duration_idx = tpm_ordinal_duration[ordinal];
else if ((ordinal & TPM_PROTECTED_ORDINAL_MASK) <
TPM_MAX_PROTECTED_ORDINAL)
duration_idx =
tpm_protected_ordinal_duration[ordinal &
TPM_PROTECTED_ORDINAL_MASK];
if (duration_idx != TPM_UNDEFINED)
duration = chip->vendor.duration[duration_idx];
if (duration <= 0)
return 2 * 60 * HZ;
else
return duration;
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
/*
* Internal kernel interface to transmit TPM commands
*/
static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
size_t bufsiz)
{
ssize_t rc;
u32 count, ordinal;
unsigned long stop;
count = be32_to_cpu(*((__be32 *) (buf + 2)));
ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
if (count == 0)
return -ENODATA;
if (count > bufsiz) {
dev_err(chip->dev,
"invalid count value %x %zx \n", count, bufsiz);
return -E2BIG;
}
mutex_lock(&chip->tpm_mutex);
if ((rc = chip->vendor.send(chip, (u8 *) buf, count)) < 0) {
dev_err(chip->dev,
"tpm_transmit: tpm_send: error %zd\n", rc);
goto out;
}
if (chip->vendor.irq)
goto out_recv;
stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
do {
u8 status = chip->vendor.status(chip);
if ((status & chip->vendor.req_complete_mask) ==
chip->vendor.req_complete_val)
goto out_recv;
if ((status == chip->vendor.req_canceled)) {
dev_err(chip->dev, "Operation Canceled\n");
rc = -ECANCELED;
goto out;
}
msleep(TPM_TIMEOUT); /* CHECK */
rmb();
} while (time_before(jiffies, stop));
chip->vendor.cancel(chip);
dev_err(chip->dev, "Operation Timed out\n");
rc = -ETIME;
goto out;
out_recv:
rc = chip->vendor.recv(chip, (u8 *) buf, bufsiz);
if (rc < 0)
dev_err(chip->dev,
"tpm_transmit: tpm_recv: error %zd\n", rc);
out:
mutex_unlock(&chip->tpm_mutex);
return rc;
}
#define TPM_DIGEST_SIZE 20
#define TPM_ERROR_SIZE 10
#define TPM_RET_CODE_IDX 6
enum tpm_capabilities {
TPM_CAP_FLAG = cpu_to_be32(4),
TPM_CAP_PROP = cpu_to_be32(5),
CAP_VERSION_1_1 = cpu_to_be32(0x06),
CAP_VERSION_1_2 = cpu_to_be32(0x1A)
};
enum tpm_sub_capabilities {
TPM_CAP_PROP_PCR = cpu_to_be32(0x101),
TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103),
TPM_CAP_FLAG_PERM = cpu_to_be32(0x108),
TPM_CAP_FLAG_VOL = cpu_to_be32(0x109),
TPM_CAP_PROP_OWNER = cpu_to_be32(0x111),
TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115),
TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120),
};
static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
int len, const char *desc)
{
int err;
len = tpm_transmit(chip,(u8 *) cmd, len);
if (len < 0)
return len;
if (len == TPM_ERROR_SIZE) {
err = be32_to_cpu(cmd->header.out.return_code);
dev_dbg(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
return err;
}
return 0;
}
#define TPM_INTERNAL_RESULT_SIZE 200
#define TPM_TAG_RQU_COMMAND cpu_to_be16(193)
#define TPM_ORD_GET_CAP cpu_to_be32(101)
static const struct tpm_input_header tpm_getcap_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(22),
.ordinal = TPM_ORD_GET_CAP
};
ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap,
const char *desc)
{
struct tpm_cmd_t tpm_cmd;
int rc;
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_cmd.header.in = tpm_getcap_header;
if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) {
tpm_cmd.params.getcap_in.cap = subcap_id;
/*subcap field not necessary */
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0);
tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32));
} else {
if (subcap_id == TPM_CAP_FLAG_PERM ||
subcap_id == TPM_CAP_FLAG_VOL)
tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG;
else
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = subcap_id;
}
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc);
if (!rc)
*cap = tpm_cmd.params.getcap_out.cap;
return rc;
}
void tpm_gen_interrupt(struct tpm_chip *chip)
{
struct tpm_cmd_t tpm_cmd;
ssize_t rc;
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the timeouts");
}
EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
void tpm_get_timeouts(struct tpm_chip *chip)
{
struct tpm_cmd_t tpm_cmd;
struct timeout_t *timeout_cap;
struct duration_t *duration_cap;
ssize_t rc;
u32 timeout;
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the timeouts");
if (rc)
goto duration;
if (be32_to_cpu(tpm_cmd.header.out.length)
!= 4 * sizeof(u32))
goto duration;
timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
/* Don't overwrite default if value is 0 */
timeout = be32_to_cpu(timeout_cap->a);
if (timeout)
chip->vendor.timeout_a = usecs_to_jiffies(timeout);
timeout = be32_to_cpu(timeout_cap->b);
if (timeout)
chip->vendor.timeout_b = usecs_to_jiffies(timeout);
timeout = be32_to_cpu(timeout_cap->c);
if (timeout)
chip->vendor.timeout_c = usecs_to_jiffies(timeout);
timeout = be32_to_cpu(timeout_cap->d);
if (timeout)
chip->vendor.timeout_d = usecs_to_jiffies(timeout);
duration:
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the durations");
if (rc)
return;
if (be32_to_cpu(tpm_cmd.header.out.return_code)
!= 3 * sizeof(u32))
return;
duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->vendor.duration[TPM_SHORT] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
* value wrong and apparently reports msecs rather than usecs. So we
* fix up the resulting too-small TPM_SHORT value to make things work.
*/
if (chip->vendor.duration[TPM_SHORT] < (HZ/100))
chip->vendor.duration[TPM_SHORT] = HZ;
chip->vendor.duration[TPM_MEDIUM] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
chip->vendor.duration[TPM_LONG] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
}
EXPORT_SYMBOL_GPL(tpm_get_timeouts);
void tpm_continue_selftest(struct tpm_chip *chip)
{
u8 data[] = {
0, 193, /* TPM_TAG_RQU_COMMAND */
0, 0, 0, 10, /* length */
0, 0, 0, 83, /* TPM_ORD_GetCapability */
};
tpm_transmit(chip, data, sizeof(data));
}
EXPORT_SYMBOL_GPL(tpm_continue_selftest);
ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
char *buf)
{
cap_t cap;
ssize_t rc;
rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
"attempting to determine the permanent enabled state");
if (rc)
return 0;
rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_enabled);
ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr,
char *buf)
{
cap_t cap;
ssize_t rc;
rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
"attempting to determine the permanent active state");
if (rc)
return 0;
rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_active);
ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr,
char *buf)
{
cap_t cap;
ssize_t rc;
rc = tpm_getcap(dev, TPM_CAP_PROP_OWNER, &cap,
"attempting to determine the owner state");
if (rc)
return 0;
rc = sprintf(buf, "%d\n", cap.owned);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_owned);
ssize_t tpm_show_temp_deactivated(struct device * dev,
struct device_attribute * attr, char *buf)
{
cap_t cap;
ssize_t rc;
rc = tpm_getcap(dev, TPM_CAP_FLAG_VOL, &cap,
"attempting to determine the temporary state");
if (rc)
return 0;
rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_temp_deactivated);
/*
* tpm_chip_find_get - return tpm_chip for given chip number
*/
static struct tpm_chip *tpm_chip_find_get(int chip_num)
{
struct tpm_chip *pos, *chip = NULL;
rcu_read_lock();
list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num)
continue;
if (try_module_get(pos->dev->driver->owner)) {
chip = pos;
break;
}
}
rcu_read_unlock();
return chip;
}
#define TPM_ORDINAL_PCRREAD cpu_to_be32(21)
#define READ_PCR_RESULT_SIZE 30
static struct tpm_input_header pcrread_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(14),
.ordinal = TPM_ORDINAL_PCRREAD
};
int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
int rc;
struct tpm_cmd_t cmd;
cmd.header.in = pcrread_header;
cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx);
rc = transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE,
"attempting to read a pcr value");
if (rc == 0)
memcpy(res_buf, cmd.params.pcrread_out.pcr_result,
TPM_DIGEST_SIZE);
return rc;
}
/**
* tpm_pcr_read - read a pcr value
* @chip_num: tpm idx # or ANY
* @pcr_idx: pcr idx to retrieve
* @res_buf: TPM_PCR value
* size of res_buf is 20 bytes (or NULL if you don't care)
*
* The TPM driver should be built-in, but for whatever reason it
* isn't, protect against the chip disappearing, by incrementing
* the module usage count.
*/
int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
{
struct tpm_chip *chip;
int rc;
chip = tpm_chip_find_get(chip_num);
if (chip == NULL)
return -ENODEV;
rc = __tpm_pcr_read(chip, pcr_idx, res_buf);
tpm_chip_put(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_read);
/**
* tpm_pcr_extend - extend pcr value with hash
* @chip_num: tpm idx # or AN&
* @pcr_idx: pcr idx to extend
* @hash: hash value used to extend pcr value
*
* The TPM driver should be built-in, but for whatever reason it
* isn't, protect against the chip disappearing, by incrementing
* the module usage count.
*/
#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
#define EXTEND_PCR_RESULT_SIZE 34
static struct tpm_input_header pcrextend_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(34),
.ordinal = TPM_ORD_PCR_EXTEND
};
int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
{
struct tpm_cmd_t cmd;
int rc;
struct tpm_chip *chip;
chip = tpm_chip_find_get(chip_num);
if (chip == NULL)
return -ENODEV;
cmd.header.in = pcrextend_header;
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx);
memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE);
rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
"attempting extend a PCR value");
tpm_chip_put(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_extend);
int tpm_send(u32 chip_num, void *cmd, size_t buflen)
{
struct tpm_chip *chip;
int rc;
chip = tpm_chip_find_get(chip_num);
if (chip == NULL)
return -ENODEV;
rc = transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd");
tpm_chip_put(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_send);
ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
char *buf)
{
cap_t cap;
u8 digest[TPM_DIGEST_SIZE];
ssize_t rc;
int i, j, num_pcrs;
char *str = buf;
struct tpm_chip *chip = dev_get_drvdata(dev);
rc = tpm_getcap(dev, TPM_CAP_PROP_PCR, &cap,
"attempting to determine the number of PCRS");
if (rc)
return 0;
num_pcrs = be32_to_cpu(cap.num_pcrs);
for (i = 0; i < num_pcrs; i++) {
rc = __tpm_pcr_read(chip, i, digest);
if (rc)
break;
str += sprintf(str, "PCR-%02d: ", i);
for (j = 0; j < TPM_DIGEST_SIZE; j++)
str += sprintf(str, "%02X ", digest[j]);
str += sprintf(str, "\n");
}
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_pcrs);
#define READ_PUBEK_RESULT_SIZE 314
#define TPM_ORD_READPUBEK cpu_to_be32(124)
struct tpm_input_header tpm_readpubek_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(30),
.ordinal = TPM_ORD_READPUBEK
};
ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
char *buf)
{
u8 *data;
struct tpm_cmd_t tpm_cmd;
ssize_t err;
int i, rc;
char *str = buf;
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_cmd.header.in = tpm_readpubek_header;
err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
"attempting to read the PUBEK");
if (err)
goto out;
/*
ignore header 10 bytes
algorithm 32 bits (1 == RSA )
encscheme 16 bits
sigscheme 16 bits
parameters (RSA 12->bytes: keybit, #primes, expbit)
keylenbytes 32 bits
256 byte modulus
ignore checksum 20 bytes
*/
data = tpm_cmd.params.readpubek_out_buffer;
str +=
sprintf(str,
"Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n"
"Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X"
" %02X %02X %02X %02X %02X %02X %02X %02X\n"
"Modulus length: %d\nModulus: \n",
data[10], data[11], data[12], data[13], data[14],
data[15], data[16], data[17], data[22], data[23],
data[24], data[25], data[26], data[27], data[28],
data[29], data[30], data[31], data[32], data[33],
be32_to_cpu(*((__be32 *) (data + 34))));
for (i = 0; i < 256; i++) {
str += sprintf(str, "%02X ", data[i + 38]);
if ((i + 1) % 16 == 0)
str += sprintf(str, "\n");
}
out:
rc = str - buf;
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_pubek);
ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
char *buf)
{
cap_t cap;
ssize_t rc;
char *str = buf;
rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
"attempting to determine the manufacturer");
if (rc)
return 0;
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(cap.manufacturer_id));
rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
"attempting to determine the 1.1 version");
if (rc)
return 0;
str += sprintf(str,
"TCG version: %d.%d\nFirmware version: %d.%d\n",
cap.tpm_version.Major, cap.tpm_version.Minor,
cap.tpm_version.revMajor, cap.tpm_version.revMinor);
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_caps);
ssize_t tpm_show_caps_1_2(struct device * dev,
struct device_attribute * attr, char *buf)
{
cap_t cap;
ssize_t rc;
char *str = buf;
rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
"attempting to determine the manufacturer");
if (rc)
return 0;
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(cap.manufacturer_id));
rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap,
"attempting to determine the 1.2 version");
if (rc)
return 0;
str += sprintf(str,
"TCG version: %d.%d\nFirmware version: %d.%d\n",
cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor,
cap.tpm_version_1_2.revMajor,
cap.tpm_version_1_2.revMinor);
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return 0;
chip->vendor.cancel(chip);
return count;
}
EXPORT_SYMBOL_GPL(tpm_store_cancel);
/*
* Device file system interface to the TPM
*
* It's assured that the chip will be opened just once,
* by the check of is_open variable, which is protected
* by driver_lock.
*/
int tpm_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
struct tpm_chip *chip = NULL, *pos;
rcu_read_lock();
list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
if (pos->vendor.miscdev.minor == minor) {
chip = pos;
get_device(chip->dev);
break;
}
}
rcu_read_unlock();
if (!chip)
return -ENODEV;
if (test_and_set_bit(0, &chip->is_open)) {
dev_dbg(chip->dev, "Another process owns this TPM\n");
put_device(chip->dev);
return -EBUSY;
}
chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
if (chip->data_buffer == NULL) {
clear_bit(0, &chip->is_open);
put_device(chip->dev);
return -ENOMEM;
}
atomic_set(&chip->data_pending, 0);
file->private_data = chip;
return 0;
}
EXPORT_SYMBOL_GPL(tpm_open);
/*
* Called on file close
*/
int tpm_release(struct inode *inode, struct file *file)
{
struct tpm_chip *chip = file->private_data;
del_singleshot_timer_sync(&chip->user_read_timer);
flush_work_sync(&chip->work);
file->private_data = NULL;
atomic_set(&chip->data_pending, 0);
kfree(chip->data_buffer);
clear_bit(0, &chip->is_open);
put_device(chip->dev);
return 0;
}
EXPORT_SYMBOL_GPL(tpm_release);
ssize_t tpm_write(struct file *file, const char __user *buf,
size_t size, loff_t *off)
{
struct tpm_chip *chip = file->private_data;
size_t in_size = size, out_size;
/* cannot perform a write until the read has cleared
either via tpm_read or a user_read_timer timeout */
while (atomic_read(&chip->data_pending) != 0)
msleep(TPM_TIMEOUT);
mutex_lock(&chip->buffer_mutex);
if (in_size > TPM_BUFSIZE)
in_size = TPM_BUFSIZE;
if (copy_from_user
(chip->data_buffer, (void __user *) buf, in_size)) {
mutex_unlock(&chip->buffer_mutex);
return -EFAULT;
}
/* atomic tpm command send and result receive */
out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
atomic_set(&chip->data_pending, out_size);
mutex_unlock(&chip->buffer_mutex);
/* Set a timeout by which the reader must come claim the result */
mod_timer(&chip->user_read_timer, jiffies + (60 * HZ));
return in_size;
}
EXPORT_SYMBOL_GPL(tpm_write);
ssize_t tpm_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
struct tpm_chip *chip = file->private_data;
ssize_t ret_size;
del_singleshot_timer_sync(&chip->user_read_timer);
flush_work_sync(&chip->work);
ret_size = atomic_read(&chip->data_pending);
atomic_set(&chip->data_pending, 0);
if (ret_size > 0) { /* relay data */
if (size < ret_size)
ret_size = size;
mutex_lock(&chip->buffer_mutex);
if (copy_to_user(buf, chip->data_buffer, ret_size))
ret_size = -EFAULT;
mutex_unlock(&chip->buffer_mutex);
}
return ret_size;
}
EXPORT_SYMBOL_GPL(tpm_read);
void tpm_remove_hardware(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL) {
dev_err(dev, "No device data found\n");
return;
}
spin_lock(&driver_lock);
list_del_rcu(&chip->list);
spin_unlock(&driver_lock);
synchronize_rcu();
misc_deregister(&chip->vendor.miscdev);
sysfs_remove_group(&dev->kobj, chip->vendor.attr_group);
tpm_bios_log_teardown(chip->bios_dir);
/* write it this way to be explicit (chip->dev == dev) */
put_device(chip->dev);
}
EXPORT_SYMBOL_GPL(tpm_remove_hardware);
#define TPM_ORD_SAVESTATE cpu_to_be32(152)
#define SAVESTATE_RESULT_SIZE 10
static struct tpm_input_header savestate_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(10),
.ordinal = TPM_ORD_SAVESTATE
};
/*
* We are about to suspend. Save the TPM state
* so that it can be restored.
*/
int tpm_pm_suspend(struct device *dev, pm_message_t pm_state)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
struct tpm_cmd_t cmd;
int rc;
u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
if (chip == NULL)
return -ENODEV;
/* for buggy tpm, flush pcrs with extend to selected dummy */
if (tpm_suspend_pcr) {
cmd.header.in = pcrextend_header;
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
TPM_DIGEST_SIZE);
rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
"extending dummy pcr before suspend");
}
/* now do the actual savestate */
cmd.header.in = savestate_header;
rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE,
"sending savestate before suspend");
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pm_suspend);
/*
* Resume from a power safe. The BIOS already restored
* the TPM state.
*/
int tpm_pm_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return -ENODEV;
return 0;
}
EXPORT_SYMBOL_GPL(tpm_pm_resume);
/* In case vendor provided release function, call it too.*/
void tpm_dev_vendor_release(struct tpm_chip *chip)
{
if (chip->vendor.release)
chip->vendor.release(chip->dev);
clear_bit(chip->dev_num, dev_mask);
kfree(chip->vendor.miscdev.name);
}
EXPORT_SYMBOL_GPL(tpm_dev_vendor_release);
/*
* Once all references to platform device are down to 0,
* release all allocated structures.
*/
void tpm_dev_release(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_dev_vendor_release(chip);
chip->release(dev);
kfree(chip);
}
EXPORT_SYMBOL_GPL(tpm_dev_release);
/*
* Called from tpm_<specific>.c probe function only for devices
* the driver has determined it should claim. Prior to calling
* this function the specific probe function has called pci_enable_device
* upon errant exit from this function specific probe function should call
* pci_disable_device
*/
struct tpm_chip *tpm_register_hardware(struct device *dev,
const struct tpm_vendor_specific *entry)
{
#define DEVNAME_SIZE 7
char *devname;
struct tpm_chip *chip;
/* Driver specific per-device data */
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL);
if (chip == NULL || devname == NULL)
goto out_free;
mutex_init(&chip->buffer_mutex);
mutex_init(&chip->tpm_mutex);
INIT_LIST_HEAD(&chip->list);
INIT_WORK(&chip->work, timeout_work);
setup_timer(&chip->user_read_timer, user_reader_timeout,
(unsigned long)chip);
memcpy(&chip->vendor, entry, sizeof(struct tpm_vendor_specific));
chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES);
if (chip->dev_num >= TPM_NUM_DEVICES) {
dev_err(dev, "No available tpm device numbers\n");
goto out_free;
} else if (chip->dev_num == 0)
chip->vendor.miscdev.minor = TPM_MINOR;
else
chip->vendor.miscdev.minor = MISC_DYNAMIC_MINOR;
set_bit(chip->dev_num, dev_mask);
scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num);
chip->vendor.miscdev.name = devname;
chip->vendor.miscdev.parent = dev;
chip->dev = get_device(dev);
chip->release = dev->release;
dev->release = tpm_dev_release;
dev_set_drvdata(dev, chip);
if (misc_register(&chip->vendor.miscdev)) {
dev_err(chip->dev,
"unable to misc_register %s, minor %d\n",
chip->vendor.miscdev.name,
chip->vendor.miscdev.minor);
put_device(chip->dev);
return NULL;
}
if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
misc_deregister(&chip->vendor.miscdev);
put_device(chip->dev);
return NULL;
}
chip->bios_dir = tpm_bios_log_setup(devname);
/* Make chip available */
spin_lock(&driver_lock);
list_add_rcu(&chip->list, &tpm_chip_list);
spin_unlock(&driver_lock);
return chip;
out_free:
kfree(chip);
kfree(devname);
return NULL;
}
EXPORT_SYMBOL_GPL(tpm_register_hardware);
MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
MODULE_DESCRIPTION("TPM Driver");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_3444_0 |
crossvul-cpp_data_good_3366_0 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2016 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/filter.h>
#include <net/netlink.h>
#include <linux/file.h>
#include <linux/vmalloc.h>
#include <linux/stringify.h>
/* bpf_check() is a static code analyzer that walks eBPF program
* instruction by instruction and updates register/stack state.
* All paths of conditional branches are analyzed until 'bpf_exit' insn.
*
* The first pass is depth-first-search to check that the program is a DAG.
* It rejects the following programs:
* - larger than BPF_MAXINSNS insns
* - if loop is present (detected via back-edge)
* - unreachable insns exist (shouldn't be a forest. program = one function)
* - out of bounds or malformed jumps
* The second pass is all possible path descent from the 1st insn.
* Since it's analyzing all pathes through the program, the length of the
* analysis is limited to 64k insn, which may be hit even if total number of
* insn is less then 4K, but there are too many branches that change stack/regs.
* Number of 'branches to be analyzed' is limited to 1k
*
* On entry to each instruction, each register has a type, and the instruction
* changes the types of the registers depending on instruction semantics.
* If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
* copied to R1.
*
* All registers are 64-bit.
* R0 - return register
* R1-R5 argument passing registers
* R6-R9 callee saved registers
* R10 - frame pointer read-only
*
* At the start of BPF program the register R1 contains a pointer to bpf_context
* and has type PTR_TO_CTX.
*
* Verifier tracks arithmetic operations on pointers in case:
* BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
* BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
* 1st insn copies R10 (which has FRAME_PTR) type into R1
* and 2nd arithmetic instruction is pattern matched to recognize
* that it wants to construct a pointer to some element within stack.
* So after 2nd insn, the register R1 has type PTR_TO_STACK
* (and -20 constant is saved for further stack bounds checking).
* Meaning that this reg is a pointer to stack plus known immediate constant.
*
* Most of the time the registers have UNKNOWN_VALUE type, which
* means the register has some value, but it's not a valid pointer.
* (like pointer plus pointer becomes UNKNOWN_VALUE type)
*
* When verifier sees load or store instructions the type of base register
* can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer
* types recognized by check_mem_access() function.
*
* PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
* and the range of [ptr, ptr + map's value_size) is accessible.
*
* registers used to pass values to function calls are checked against
* function argument constraints.
*
* ARG_PTR_TO_MAP_KEY is one of such argument constraints.
* It means that the register type passed to this function must be
* PTR_TO_STACK and it will be used inside the function as
* 'pointer to map element key'
*
* For example the argument constraints for bpf_map_lookup_elem():
* .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
* .arg1_type = ARG_CONST_MAP_PTR,
* .arg2_type = ARG_PTR_TO_MAP_KEY,
*
* ret_type says that this function returns 'pointer to map elem value or null'
* function expects 1st argument to be a const pointer to 'struct bpf_map' and
* 2nd argument should be a pointer to stack, which will be used inside
* the helper function as a pointer to map element key.
*
* On the kernel side the helper function looks like:
* u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
* {
* struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
* void *key = (void *) (unsigned long) r2;
* void *value;
*
* here kernel can access 'key' and 'map' pointers safely, knowing that
* [key, key + map->key_size) bytes are valid and were initialized on
* the stack of eBPF program.
* }
*
* Corresponding eBPF program may look like:
* BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
* BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
* BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
* BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
* here verifier looks at prototype of map_lookup_elem() and sees:
* .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
* Now verifier knows that this map has key of R1->map_ptr->key_size bytes
*
* Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
* Now verifier checks that [R2, R2 + map's key_size) are within stack limits
* and were initialized prior to this call.
* If it's ok, then verifier allows this BPF_CALL insn and looks at
* .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
* R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
* returns ether pointer to map value or NULL.
*
* When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
* insn, the register holding that pointer in the true branch changes state to
* PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
* branch. See check_cond_jmp_op().
*
* After the call R0 is set to return type of the function and registers R1-R5
* are set to NOT_INIT to indicate that they are no longer readable.
*/
/* verifier_state + insn_idx are pushed to stack when branch is encountered */
struct bpf_verifier_stack_elem {
/* verifer state is 'st'
* before processing instruction 'insn_idx'
* and after processing instruction 'prev_insn_idx'
*/
struct bpf_verifier_state st;
int insn_idx;
int prev_insn_idx;
struct bpf_verifier_stack_elem *next;
};
#define BPF_COMPLEXITY_LIMIT_INSNS 65536
#define BPF_COMPLEXITY_LIMIT_STACK 1024
#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
struct bpf_call_arg_meta {
struct bpf_map *map_ptr;
bool raw_mode;
bool pkt_access;
int regno;
int access_size;
};
/* verbose verifier prints what it's seeing
* bpf_check() is called under lock, so no race to access these global vars
*/
static u32 log_level, log_size, log_len;
static char *log_buf;
static DEFINE_MUTEX(bpf_verifier_lock);
/* log_level controls verbosity level of eBPF verifier.
* verbose() is used to dump the verification trace to the log, so the user
* can figure out what's wrong with the program
*/
static __printf(1, 2) void verbose(const char *fmt, ...)
{
va_list args;
if (log_level == 0 || log_len >= log_size - 1)
return;
va_start(args, fmt);
log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args);
va_end(args);
}
/* string representation of 'enum bpf_reg_type' */
static const char * const reg_type_str[] = {
[NOT_INIT] = "?",
[UNKNOWN_VALUE] = "inv",
[PTR_TO_CTX] = "ctx",
[CONST_PTR_TO_MAP] = "map_ptr",
[PTR_TO_MAP_VALUE] = "map_value",
[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
[PTR_TO_MAP_VALUE_ADJ] = "map_value_adj",
[FRAME_PTR] = "fp",
[PTR_TO_STACK] = "fp",
[CONST_IMM] = "imm",
[PTR_TO_PACKET] = "pkt",
[PTR_TO_PACKET_END] = "pkt_end",
};
#define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x)
static const char * const func_id_str[] = {
__BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN)
};
#undef __BPF_FUNC_STR_FN
static const char *func_id_name(int id)
{
BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
return func_id_str[id];
else
return "unknown";
}
static void print_verifier_state(struct bpf_verifier_state *state)
{
struct bpf_reg_state *reg;
enum bpf_reg_type t;
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
reg = &state->regs[i];
t = reg->type;
if (t == NOT_INIT)
continue;
verbose(" R%d=%s", i, reg_type_str[t]);
if (t == CONST_IMM || t == PTR_TO_STACK)
verbose("%lld", reg->imm);
else if (t == PTR_TO_PACKET)
verbose("(id=%d,off=%d,r=%d)",
reg->id, reg->off, reg->range);
else if (t == UNKNOWN_VALUE && reg->imm)
verbose("%lld", reg->imm);
else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE ||
t == PTR_TO_MAP_VALUE_OR_NULL ||
t == PTR_TO_MAP_VALUE_ADJ)
verbose("(ks=%d,vs=%d,id=%u)",
reg->map_ptr->key_size,
reg->map_ptr->value_size,
reg->id);
if (reg->min_value != BPF_REGISTER_MIN_RANGE)
verbose(",min_value=%lld",
(long long)reg->min_value);
if (reg->max_value != BPF_REGISTER_MAX_RANGE)
verbose(",max_value=%llu",
(unsigned long long)reg->max_value);
}
for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
if (state->stack_slot_type[i] == STACK_SPILL)
verbose(" fp%d=%s", -MAX_BPF_STACK + i,
reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]);
}
verbose("\n");
}
static const char *const bpf_class_string[] = {
[BPF_LD] = "ld",
[BPF_LDX] = "ldx",
[BPF_ST] = "st",
[BPF_STX] = "stx",
[BPF_ALU] = "alu",
[BPF_JMP] = "jmp",
[BPF_RET] = "BUG",
[BPF_ALU64] = "alu64",
};
static const char *const bpf_alu_string[16] = {
[BPF_ADD >> 4] = "+=",
[BPF_SUB >> 4] = "-=",
[BPF_MUL >> 4] = "*=",
[BPF_DIV >> 4] = "/=",
[BPF_OR >> 4] = "|=",
[BPF_AND >> 4] = "&=",
[BPF_LSH >> 4] = "<<=",
[BPF_RSH >> 4] = ">>=",
[BPF_NEG >> 4] = "neg",
[BPF_MOD >> 4] = "%=",
[BPF_XOR >> 4] = "^=",
[BPF_MOV >> 4] = "=",
[BPF_ARSH >> 4] = "s>>=",
[BPF_END >> 4] = "endian",
};
static const char *const bpf_ldst_string[] = {
[BPF_W >> 3] = "u32",
[BPF_H >> 3] = "u16",
[BPF_B >> 3] = "u8",
[BPF_DW >> 3] = "u64",
};
static const char *const bpf_jmp_string[16] = {
[BPF_JA >> 4] = "jmp",
[BPF_JEQ >> 4] = "==",
[BPF_JGT >> 4] = ">",
[BPF_JGE >> 4] = ">=",
[BPF_JSET >> 4] = "&",
[BPF_JNE >> 4] = "!=",
[BPF_JSGT >> 4] = "s>",
[BPF_JSGE >> 4] = "s>=",
[BPF_CALL >> 4] = "call",
[BPF_EXIT >> 4] = "exit",
};
static void print_bpf_insn(const struct bpf_verifier_env *env,
const struct bpf_insn *insn)
{
u8 class = BPF_CLASS(insn->code);
if (class == BPF_ALU || class == BPF_ALU64) {
if (BPF_SRC(insn->code) == BPF_X)
verbose("(%02x) %sr%d %s %sr%d\n",
insn->code, class == BPF_ALU ? "(u32) " : "",
insn->dst_reg,
bpf_alu_string[BPF_OP(insn->code) >> 4],
class == BPF_ALU ? "(u32) " : "",
insn->src_reg);
else
verbose("(%02x) %sr%d %s %s%d\n",
insn->code, class == BPF_ALU ? "(u32) " : "",
insn->dst_reg,
bpf_alu_string[BPF_OP(insn->code) >> 4],
class == BPF_ALU ? "(u32) " : "",
insn->imm);
} else if (class == BPF_STX) {
if (BPF_MODE(insn->code) == BPF_MEM)
verbose("(%02x) *(%s *)(r%d %+d) = r%d\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg,
insn->off, insn->src_reg);
else if (BPF_MODE(insn->code) == BPF_XADD)
verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg, insn->off,
insn->src_reg);
else
verbose("BUG_%02x\n", insn->code);
} else if (class == BPF_ST) {
if (BPF_MODE(insn->code) != BPF_MEM) {
verbose("BUG_st_%02x\n", insn->code);
return;
}
verbose("(%02x) *(%s *)(r%d %+d) = %d\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg,
insn->off, insn->imm);
} else if (class == BPF_LDX) {
if (BPF_MODE(insn->code) != BPF_MEM) {
verbose("BUG_ldx_%02x\n", insn->code);
return;
}
verbose("(%02x) r%d = *(%s *)(r%d %+d)\n",
insn->code, insn->dst_reg,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->src_reg, insn->off);
} else if (class == BPF_LD) {
if (BPF_MODE(insn->code) == BPF_ABS) {
verbose("(%02x) r0 = *(%s *)skb[%d]\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->imm);
} else if (BPF_MODE(insn->code) == BPF_IND) {
verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->src_reg, insn->imm);
} else if (BPF_MODE(insn->code) == BPF_IMM &&
BPF_SIZE(insn->code) == BPF_DW) {
/* At this point, we already made sure that the second
* part of the ldimm64 insn is accessible.
*/
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
if (map_ptr && !env->allow_ptr_leaks)
imm = 0;
verbose("(%02x) r%d = 0x%llx\n", insn->code,
insn->dst_reg, (unsigned long long)imm);
} else {
verbose("BUG_ld_%02x\n", insn->code);
return;
}
} else if (class == BPF_JMP) {
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
verbose("(%02x) call %s#%d\n", insn->code,
func_id_name(insn->imm), insn->imm);
} else if (insn->code == (BPF_JMP | BPF_JA)) {
verbose("(%02x) goto pc%+d\n",
insn->code, insn->off);
} else if (insn->code == (BPF_JMP | BPF_EXIT)) {
verbose("(%02x) exit\n", insn->code);
} else if (BPF_SRC(insn->code) == BPF_X) {
verbose("(%02x) if r%d %s r%d goto pc%+d\n",
insn->code, insn->dst_reg,
bpf_jmp_string[BPF_OP(insn->code) >> 4],
insn->src_reg, insn->off);
} else {
verbose("(%02x) if r%d %s 0x%x goto pc%+d\n",
insn->code, insn->dst_reg,
bpf_jmp_string[BPF_OP(insn->code) >> 4],
insn->imm, insn->off);
}
} else {
verbose("(%02x) %s\n", insn->code, bpf_class_string[class]);
}
}
static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx)
{
struct bpf_verifier_stack_elem *elem;
int insn_idx;
if (env->head == NULL)
return -1;
memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state));
insn_idx = env->head->insn_idx;
if (prev_insn_idx)
*prev_insn_idx = env->head->prev_insn_idx;
elem = env->head->next;
kfree(env->head);
env->head = elem;
env->stack_size--;
return insn_idx;
}
static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx)
{
struct bpf_verifier_stack_elem *elem;
elem = kmalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
if (!elem)
goto err;
memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state));
elem->insn_idx = insn_idx;
elem->prev_insn_idx = prev_insn_idx;
elem->next = env->head;
env->head = elem;
env->stack_size++;
if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
verbose("BPF program is too complex\n");
goto err;
}
return &elem->st;
err:
/* pop all elements and return */
while (pop_stack(env, NULL) >= 0);
return NULL;
}
#define CALLER_SAVED_REGS 6
static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};
static void init_reg_state(struct bpf_reg_state *regs)
{
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
regs[i].type = NOT_INIT;
regs[i].imm = 0;
regs[i].min_value = BPF_REGISTER_MIN_RANGE;
regs[i].max_value = BPF_REGISTER_MAX_RANGE;
}
/* frame pointer */
regs[BPF_REG_FP].type = FRAME_PTR;
/* 1st arg to a function */
regs[BPF_REG_1].type = PTR_TO_CTX;
}
static void __mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
{
regs[regno].type = UNKNOWN_VALUE;
regs[regno].id = 0;
regs[regno].imm = 0;
}
static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
{
BUG_ON(regno >= MAX_BPF_REG);
__mark_reg_unknown_value(regs, regno);
}
static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
{
regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
}
static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs,
u32 regno)
{
mark_reg_unknown_value(regs, regno);
reset_reg_range_values(regs, regno);
}
enum reg_arg_type {
SRC_OP, /* register is used as source operand */
DST_OP, /* register is used as destination operand */
DST_OP_NO_MARK /* same as above, check only, don't mark */
};
static int check_reg_arg(struct bpf_reg_state *regs, u32 regno,
enum reg_arg_type t)
{
if (regno >= MAX_BPF_REG) {
verbose("R%d is invalid\n", regno);
return -EINVAL;
}
if (t == SRC_OP) {
/* check whether register used as source operand can be read */
if (regs[regno].type == NOT_INIT) {
verbose("R%d !read_ok\n", regno);
return -EACCES;
}
} else {
/* check whether register used as dest operand can be written to */
if (regno == BPF_REG_FP) {
verbose("frame pointer is read only\n");
return -EACCES;
}
if (t == DST_OP)
mark_reg_unknown_value(regs, regno);
}
return 0;
}
static int bpf_size_to_bytes(int bpf_size)
{
if (bpf_size == BPF_W)
return 4;
else if (bpf_size == BPF_H)
return 2;
else if (bpf_size == BPF_B)
return 1;
else if (bpf_size == BPF_DW)
return 8;
else
return -EINVAL;
}
static bool is_spillable_regtype(enum bpf_reg_type type)
{
switch (type) {
case PTR_TO_MAP_VALUE:
case PTR_TO_MAP_VALUE_OR_NULL:
case PTR_TO_MAP_VALUE_ADJ:
case PTR_TO_STACK:
case PTR_TO_CTX:
case PTR_TO_PACKET:
case PTR_TO_PACKET_END:
case FRAME_PTR:
case CONST_PTR_TO_MAP:
return true;
default:
return false;
}
}
/* check_stack_read/write functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
static int check_stack_write(struct bpf_verifier_state *state, int off,
int size, int value_regno)
{
int i;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
* so it's aligned access and [off, off + size) are within stack limits
*/
if (value_regno >= 0 &&
is_spillable_regtype(state->regs[value_regno].type)) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
verbose("invalid size of register spill\n");
return -EACCES;
}
/* save register state */
state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
state->regs[value_regno];
for (i = 0; i < BPF_REG_SIZE; i++)
state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
} else {
/* regular write of data into stack */
state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
(struct bpf_reg_state) {};
for (i = 0; i < size; i++)
state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
}
return 0;
}
static int check_stack_read(struct bpf_verifier_state *state, int off, int size,
int value_regno)
{
u8 *slot_type;
int i;
slot_type = &state->stack_slot_type[MAX_BPF_STACK + off];
if (slot_type[0] == STACK_SPILL) {
if (size != BPF_REG_SIZE) {
verbose("invalid size of register spill\n");
return -EACCES;
}
for (i = 1; i < BPF_REG_SIZE; i++) {
if (slot_type[i] != STACK_SPILL) {
verbose("corrupted spill memory\n");
return -EACCES;
}
}
if (value_regno >= 0)
/* restore register state from stack */
state->regs[value_regno] =
state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE];
return 0;
} else {
for (i = 0; i < size; i++) {
if (slot_type[i] != STACK_MISC) {
verbose("invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
}
}
if (value_regno >= 0)
/* have read misc data from the stack */
mark_reg_unknown_value_and_range(state->regs,
value_regno);
return 0;
}
}
/* check read/write into map element returned by bpf_map_lookup_elem() */
static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
int size)
{
struct bpf_map *map = env->cur_state.regs[regno].map_ptr;
if (off < 0 || size <= 0 || off + size > map->value_size) {
verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
map->value_size, off, size);
return -EACCES;
}
return 0;
}
/* check read/write into an adjusted map element */
static int check_map_access_adj(struct bpf_verifier_env *env, u32 regno,
int off, int size)
{
struct bpf_verifier_state *state = &env->cur_state;
struct bpf_reg_state *reg = &state->regs[regno];
int err;
/* We adjusted the register to this map value, so we
* need to change off and size to min_value and max_value
* respectively to make sure our theoretical access will be
* safe.
*/
if (log_level)
print_verifier_state(state);
env->varlen_map_value_access = true;
/* The minimum value is only important with signed
* comparisons where we can't assume the floor of a
* value is 0. If we are using signed variables for our
* index'es we need to make sure that whatever we use
* will have a set floor within our range.
*/
if (reg->min_value < 0) {
verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno);
return -EACCES;
}
err = check_map_access(env, regno, reg->min_value + off, size);
if (err) {
verbose("R%d min value is outside of the array range\n",
regno);
return err;
}
/* If we haven't set a max value then we need to bail
* since we can't be sure we won't do bad things.
*/
if (reg->max_value == BPF_REGISTER_MAX_RANGE) {
verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n",
regno);
return -EACCES;
}
return check_map_access(env, regno, reg->max_value + off, size);
}
#define MAX_PACKET_OFF 0xffff
static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
const struct bpf_call_arg_meta *meta,
enum bpf_access_type t)
{
switch (env->prog->type) {
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
/* dst_input() and dst_output() can't write for now */
if (t == BPF_WRITE)
return false;
/* fallthrough */
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
case BPF_PROG_TYPE_XDP:
case BPF_PROG_TYPE_LWT_XMIT:
if (meta)
return meta->pkt_access;
env->seen_direct_write = true;
return true;
default:
return false;
}
}
static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
int size)
{
struct bpf_reg_state *regs = env->cur_state.regs;
struct bpf_reg_state *reg = ®s[regno];
off += reg->off;
if (off < 0 || size <= 0 || off + size > reg->range) {
verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
off, size, regno, reg->id, reg->off, reg->range);
return -EACCES;
}
return 0;
}
/* check access to 'struct bpf_context' fields */
static int check_ctx_access(struct bpf_verifier_env *env, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type)
{
/* for analyzer ctx accesses are already validated and converted */
if (env->analyzer_ops)
return 0;
if (env->prog->aux->ops->is_valid_access &&
env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) {
/* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size)
env->prog->aux->max_ctx_offset = off + size;
return 0;
}
verbose("invalid bpf_context access off=%d size=%d\n", off, size);
return -EACCES;
}
static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
{
if (env->allow_ptr_leaks)
return false;
switch (env->cur_state.regs[regno].type) {
case UNKNOWN_VALUE:
case CONST_IMM:
return false;
default:
return true;
}
}
static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
int off, int size)
{
if (reg->id && size != 1) {
verbose("Unknown alignment. Only byte-sized access allowed in packet access.\n");
return -EACCES;
}
/* skb->data is NET_IP_ALIGN-ed */
if ((NET_IP_ALIGN + reg->off + off) % size != 0) {
verbose("misaligned packet access off %d+%d+%d size %d\n",
NET_IP_ALIGN, reg->off, off, size);
return -EACCES;
}
return 0;
}
static int check_val_ptr_alignment(const struct bpf_reg_state *reg,
int size)
{
if (size != 1) {
verbose("Unknown alignment. Only byte-sized access allowed in value access.\n");
return -EACCES;
}
return 0;
}
static int check_ptr_alignment(const struct bpf_reg_state *reg,
int off, int size)
{
switch (reg->type) {
case PTR_TO_PACKET:
return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
check_pkt_ptr_alignment(reg, off, size);
case PTR_TO_MAP_VALUE_ADJ:
return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
check_val_ptr_alignment(reg, size);
default:
if (off % size != 0) {
verbose("misaligned access off %d size %d\n",
off, size);
return -EACCES;
}
return 0;
}
}
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
* if t==write && value_regno==-1, some unknown value is stored into memory
* if t==read && value_regno==-1, don't care what we read from memory
*/
static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
int bpf_size, enum bpf_access_type t,
int value_regno)
{
struct bpf_verifier_state *state = &env->cur_state;
struct bpf_reg_state *reg = &state->regs[regno];
int size, err = 0;
if (reg->type == PTR_TO_STACK)
off += reg->imm;
size = bpf_size_to_bytes(bpf_size);
if (size < 0)
return size;
err = check_ptr_alignment(reg, off, size);
if (err)
return err;
if (reg->type == PTR_TO_MAP_VALUE ||
reg->type == PTR_TO_MAP_VALUE_ADJ) {
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose("R%d leaks addr into map\n", value_regno);
return -EACCES;
}
if (reg->type == PTR_TO_MAP_VALUE_ADJ)
err = check_map_access_adj(env, regno, off, size);
else
err = check_map_access(env, regno, off, size);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown_value_and_range(state->regs,
value_regno);
} else if (reg->type == PTR_TO_CTX) {
enum bpf_reg_type reg_type = UNKNOWN_VALUE;
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose("R%d leaks addr into ctx\n", value_regno);
return -EACCES;
}
err = check_ctx_access(env, off, size, t, ®_type);
if (!err && t == BPF_READ && value_regno >= 0) {
mark_reg_unknown_value_and_range(state->regs,
value_regno);
/* note that reg.[id|off|range] == 0 */
state->regs[value_regno].type = reg_type;
}
} else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) {
if (off >= 0 || off < -MAX_BPF_STACK) {
verbose("invalid stack off=%d size=%d\n", off, size);
return -EACCES;
}
if (t == BPF_WRITE) {
if (!env->allow_ptr_leaks &&
state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL &&
size != BPF_REG_SIZE) {
verbose("attempt to corrupt spilled pointer on stack\n");
return -EACCES;
}
err = check_stack_write(state, off, size, value_regno);
} else {
err = check_stack_read(state, off, size, value_regno);
}
} else if (state->regs[regno].type == PTR_TO_PACKET) {
if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
verbose("cannot write into packet\n");
return -EACCES;
}
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose("R%d leaks addr into packet\n", value_regno);
return -EACCES;
}
err = check_packet_access(env, regno, off, size);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown_value_and_range(state->regs,
value_regno);
} else {
verbose("R%d invalid mem access '%s'\n",
regno, reg_type_str[reg->type]);
return -EACCES;
}
if (!err && size <= 2 && value_regno >= 0 && env->allow_ptr_leaks &&
state->regs[value_regno].type == UNKNOWN_VALUE) {
/* 1 or 2 byte load zero-extends, determine the number of
* zero upper bits. Not doing it fo 4 byte load, since
* such values cannot be added to ptr_to_packet anyway.
*/
state->regs[value_regno].imm = 64 - size * 8;
}
return err;
}
static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = env->cur_state.regs;
int err;
if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
insn->imm != 0) {
verbose("BPF_XADD uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
/* check src2 operand */
err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
if (err)
return err;
/* check whether atomic_add can read the memory */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1);
if (err)
return err;
/* check whether atomic_add can write into the same memory */
return check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, -1);
}
/* when register 'regno' is passed into function that will read 'access_size'
* bytes from that pointer, make sure that it's within stack boundary
* and all elements of stack are initialized
*/
static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
struct bpf_verifier_state *state = &env->cur_state;
struct bpf_reg_state *regs = state->regs;
int off, i;
if (regs[regno].type != PTR_TO_STACK) {
if (zero_size_allowed && access_size == 0 &&
regs[regno].type == CONST_IMM &&
regs[regno].imm == 0)
return 0;
verbose("R%d type=%s expected=%s\n", regno,
reg_type_str[regs[regno].type],
reg_type_str[PTR_TO_STACK]);
return -EACCES;
}
off = regs[regno].imm;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
access_size <= 0) {
verbose("invalid stack type R%d off=%d access_size=%d\n",
regno, off, access_size);
return -EACCES;
}
if (meta && meta->raw_mode) {
meta->access_size = access_size;
meta->regno = regno;
return 0;
}
for (i = 0; i < access_size; i++) {
if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
verbose("invalid indirect read from stack off %d+%d size %d\n",
off, i, access_size);
return -EACCES;
}
}
return 0;
}
static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *regs = env->cur_state.regs;
switch (regs[regno].type) {
case PTR_TO_PACKET:
return check_packet_access(env, regno, 0, access_size);
case PTR_TO_MAP_VALUE:
return check_map_access(env, regno, 0, access_size);
case PTR_TO_MAP_VALUE_ADJ:
return check_map_access_adj(env, regno, 0, access_size);
default: /* const_imm|ptr_to_stack or invalid ptr */
return check_stack_boundary(env, regno, access_size,
zero_size_allowed, meta);
}
}
static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
enum bpf_arg_type arg_type,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *regs = env->cur_state.regs, *reg = ®s[regno];
enum bpf_reg_type expected_type, type = reg->type;
int err = 0;
if (arg_type == ARG_DONTCARE)
return 0;
if (type == NOT_INIT) {
verbose("R%d !read_ok\n", regno);
return -EACCES;
}
if (arg_type == ARG_ANYTHING) {
if (is_pointer_value(env, regno)) {
verbose("R%d leaks addr into helper function\n", regno);
return -EACCES;
}
return 0;
}
if (type == PTR_TO_PACKET &&
!may_access_direct_pkt_data(env, meta, BPF_READ)) {
verbose("helper access to the packet is not allowed\n");
return -EACCES;
}
if (arg_type == ARG_PTR_TO_MAP_KEY ||
arg_type == ARG_PTR_TO_MAP_VALUE) {
expected_type = PTR_TO_STACK;
if (type != PTR_TO_PACKET && type != expected_type)
goto err_type;
} else if (arg_type == ARG_CONST_SIZE ||
arg_type == ARG_CONST_SIZE_OR_ZERO) {
expected_type = CONST_IMM;
/* One exception. Allow UNKNOWN_VALUE registers when the
* boundaries are known and don't cause unsafe memory accesses
*/
if (type != UNKNOWN_VALUE && type != expected_type)
goto err_type;
} else if (arg_type == ARG_CONST_MAP_PTR) {
expected_type = CONST_PTR_TO_MAP;
if (type != expected_type)
goto err_type;
} else if (arg_type == ARG_PTR_TO_CTX) {
expected_type = PTR_TO_CTX;
if (type != expected_type)
goto err_type;
} else if (arg_type == ARG_PTR_TO_MEM ||
arg_type == ARG_PTR_TO_UNINIT_MEM) {
expected_type = PTR_TO_STACK;
/* One exception here. In case function allows for NULL to be
* passed in as argument, it's a CONST_IMM type. Final test
* happens during stack boundary checking.
*/
if (type == CONST_IMM && reg->imm == 0)
/* final test in check_stack_boundary() */;
else if (type != PTR_TO_PACKET && type != PTR_TO_MAP_VALUE &&
type != PTR_TO_MAP_VALUE_ADJ && type != expected_type)
goto err_type;
meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
} else {
verbose("unsupported arg_type %d\n", arg_type);
return -EFAULT;
}
if (arg_type == ARG_CONST_MAP_PTR) {
/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
meta->map_ptr = reg->map_ptr;
} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
/* bpf_map_xxx(..., map_ptr, ..., key) call:
* check that [key, key + map->key_size) are within
* stack limits and initialized
*/
if (!meta->map_ptr) {
/* in function declaration map_ptr must come before
* map_key, so that it's verified and known before
* we have to check map_key here. Otherwise it means
* that kernel subsystem misconfigured verifier
*/
verbose("invalid map_ptr to access map->key\n");
return -EACCES;
}
if (type == PTR_TO_PACKET)
err = check_packet_access(env, regno, 0,
meta->map_ptr->key_size);
else
err = check_stack_boundary(env, regno,
meta->map_ptr->key_size,
false, NULL);
} else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
/* bpf_map_xxx(..., map_ptr, ..., value) call:
* check [value, value + map->value_size) validity
*/
if (!meta->map_ptr) {
/* kernel subsystem misconfigured verifier */
verbose("invalid map_ptr to access map->value\n");
return -EACCES;
}
if (type == PTR_TO_PACKET)
err = check_packet_access(env, regno, 0,
meta->map_ptr->value_size);
else
err = check_stack_boundary(env, regno,
meta->map_ptr->value_size,
false, NULL);
} else if (arg_type == ARG_CONST_SIZE ||
arg_type == ARG_CONST_SIZE_OR_ZERO) {
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
/* bpf_xxx(..., buf, len) call will access 'len' bytes
* from stack pointer 'buf'. Check it
* note: regno == len, regno - 1 == buf
*/
if (regno == 0) {
/* kernel subsystem misconfigured verifier */
verbose("ARG_CONST_SIZE cannot be first argument\n");
return -EACCES;
}
/* If the register is UNKNOWN_VALUE, the access check happens
* using its boundaries. Otherwise, just use its imm
*/
if (type == UNKNOWN_VALUE) {
/* For unprivileged variable accesses, disable raw
* mode so that the program is required to
* initialize all the memory that the helper could
* just partially fill up.
*/
meta = NULL;
if (reg->min_value < 0) {
verbose("R%d min value is negative, either use unsigned or 'var &= const'\n",
regno);
return -EACCES;
}
if (reg->min_value == 0) {
err = check_helper_mem_access(env, regno - 1, 0,
zero_size_allowed,
meta);
if (err)
return err;
}
if (reg->max_value == BPF_REGISTER_MAX_RANGE) {
verbose("R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
regno);
return -EACCES;
}
err = check_helper_mem_access(env, regno - 1,
reg->max_value,
zero_size_allowed, meta);
if (err)
return err;
} else {
/* register is CONST_IMM */
err = check_helper_mem_access(env, regno - 1, reg->imm,
zero_size_allowed, meta);
}
}
return err;
err_type:
verbose("R%d type=%s expected=%s\n", regno,
reg_type_str[type], reg_type_str[expected_type]);
return -EACCES;
}
static int check_map_func_compatibility(struct bpf_map *map, int func_id)
{
if (!map)
return 0;
/* We need a two way check, first is from map perspective ... */
switch (map->map_type) {
case BPF_MAP_TYPE_PROG_ARRAY:
if (func_id != BPF_FUNC_tail_call)
goto error;
break;
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
if (func_id != BPF_FUNC_perf_event_read &&
func_id != BPF_FUNC_perf_event_output)
goto error;
break;
case BPF_MAP_TYPE_STACK_TRACE:
if (func_id != BPF_FUNC_get_stackid)
goto error;
break;
case BPF_MAP_TYPE_CGROUP_ARRAY:
if (func_id != BPF_FUNC_skb_under_cgroup &&
func_id != BPF_FUNC_current_task_under_cgroup)
goto error;
break;
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS:
if (func_id != BPF_FUNC_map_lookup_elem)
goto error;
default:
break;
}
/* ... and second from the function itself. */
switch (func_id) {
case BPF_FUNC_tail_call:
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
goto error;
break;
case BPF_FUNC_perf_event_read:
case BPF_FUNC_perf_event_output:
if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
goto error;
break;
case BPF_FUNC_get_stackid:
if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
goto error;
break;
case BPF_FUNC_current_task_under_cgroup:
case BPF_FUNC_skb_under_cgroup:
if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
goto error;
break;
default:
break;
}
return 0;
error:
verbose("cannot pass map_type %d into func %s#%d\n",
map->map_type, func_id_name(func_id), func_id);
return -EINVAL;
}
static int check_raw_mode(const struct bpf_func_proto *fn)
{
int count = 0;
if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
count++;
return count > 1 ? -EINVAL : 0;
}
static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *state = &env->cur_state;
struct bpf_reg_state *regs = state->regs, *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].type == PTR_TO_PACKET ||
regs[i].type == PTR_TO_PACKET_END)
mark_reg_unknown_value(regs, i);
for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
if (state->stack_slot_type[i] != STACK_SPILL)
continue;
reg = &state->spilled_regs[i / BPF_REG_SIZE];
if (reg->type != PTR_TO_PACKET &&
reg->type != PTR_TO_PACKET_END)
continue;
reg->type = UNKNOWN_VALUE;
reg->imm = 0;
}
}
static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
{
struct bpf_verifier_state *state = &env->cur_state;
const struct bpf_func_proto *fn = NULL;
struct bpf_reg_state *regs = state->regs;
struct bpf_reg_state *reg;
struct bpf_call_arg_meta meta;
bool changes_data;
int i, err;
/* find function prototype */
if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
verbose("invalid func %s#%d\n", func_id_name(func_id), func_id);
return -EINVAL;
}
if (env->prog->aux->ops->get_func_proto)
fn = env->prog->aux->ops->get_func_proto(func_id);
if (!fn) {
verbose("unknown func %s#%d\n", func_id_name(func_id), func_id);
return -EINVAL;
}
/* eBPF programs must be GPL compatible to use GPL-ed functions */
if (!env->prog->gpl_compatible && fn->gpl_only) {
verbose("cannot call GPL only function from proprietary program\n");
return -EINVAL;
}
changes_data = bpf_helper_changes_pkt_data(fn->func);
memset(&meta, 0, sizeof(meta));
meta.pkt_access = fn->pkt_access;
/* We only support one arg being in raw mode at the moment, which
* is sufficient for the helper functions we have right now.
*/
err = check_raw_mode(fn);
if (err) {
verbose("kernel subsystem misconfigured func %s#%d\n",
func_id_name(func_id), func_id);
return err;
}
/* check args */
err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
if (err)
return err;
/* Mark slots with STACK_MISC in case of raw mode, stack offset
* is inferred from register state.
*/
for (i = 0; i < meta.access_size; i++) {
err = check_mem_access(env, meta.regno, i, BPF_B, BPF_WRITE, -1);
if (err)
return err;
}
/* reset caller saved regs */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
reg = regs + caller_saved[i];
reg->type = NOT_INIT;
reg->imm = 0;
}
/* update return register */
if (fn->ret_type == RET_INTEGER) {
regs[BPF_REG_0].type = UNKNOWN_VALUE;
} else if (fn->ret_type == RET_VOID) {
regs[BPF_REG_0].type = NOT_INIT;
} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
struct bpf_insn_aux_data *insn_aux;
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
regs[BPF_REG_0].max_value = regs[BPF_REG_0].min_value = 0;
/* remember map_ptr, so that check_map_access()
* can check 'value_size' boundary of memory access
* to map element returned from bpf_map_lookup_elem()
*/
if (meta.map_ptr == NULL) {
verbose("kernel subsystem misconfigured verifier\n");
return -EINVAL;
}
regs[BPF_REG_0].map_ptr = meta.map_ptr;
regs[BPF_REG_0].id = ++env->id_gen;
insn_aux = &env->insn_aux_data[insn_idx];
if (!insn_aux->map_ptr)
insn_aux->map_ptr = meta.map_ptr;
else if (insn_aux->map_ptr != meta.map_ptr)
insn_aux->map_ptr = BPF_MAP_PTR_POISON;
} else {
verbose("unknown return type %d of func %s#%d\n",
fn->ret_type, func_id_name(func_id), func_id);
return -EINVAL;
}
err = check_map_func_compatibility(meta.map_ptr, func_id);
if (err)
return err;
if (changes_data)
clear_all_pkt_pointers(env);
return 0;
}
static int check_packet_ptr_add(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
struct bpf_reg_state *regs = env->cur_state.regs;
struct bpf_reg_state *dst_reg = ®s[insn->dst_reg];
struct bpf_reg_state *src_reg = ®s[insn->src_reg];
struct bpf_reg_state tmp_reg;
s32 imm;
if (BPF_SRC(insn->code) == BPF_K) {
/* pkt_ptr += imm */
imm = insn->imm;
add_imm:
if (imm < 0) {
verbose("addition of negative constant to packet pointer is not allowed\n");
return -EACCES;
}
if (imm >= MAX_PACKET_OFF ||
imm + dst_reg->off >= MAX_PACKET_OFF) {
verbose("constant %d is too large to add to packet pointer\n",
imm);
return -EACCES;
}
/* a constant was added to pkt_ptr.
* Remember it while keeping the same 'id'
*/
dst_reg->off += imm;
} else {
if (src_reg->type == PTR_TO_PACKET) {
/* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */
tmp_reg = *dst_reg; /* save r7 state */
*dst_reg = *src_reg; /* copy pkt_ptr state r6 into r7 */
src_reg = &tmp_reg; /* pretend it's src_reg state */
/* if the checks below reject it, the copy won't matter,
* since we're rejecting the whole program. If all ok,
* then imm22 state will be added to r7
* and r7 will be pkt(id=0,off=22,r=62) while
* r6 will stay as pkt(id=0,off=0,r=62)
*/
}
if (src_reg->type == CONST_IMM) {
/* pkt_ptr += reg where reg is known constant */
imm = src_reg->imm;
goto add_imm;
}
/* disallow pkt_ptr += reg
* if reg is not uknown_value with guaranteed zero upper bits
* otherwise pkt_ptr may overflow and addition will become
* subtraction which is not allowed
*/
if (src_reg->type != UNKNOWN_VALUE) {
verbose("cannot add '%s' to ptr_to_packet\n",
reg_type_str[src_reg->type]);
return -EACCES;
}
if (src_reg->imm < 48) {
verbose("cannot add integer value with %lld upper zero bits to ptr_to_packet\n",
src_reg->imm);
return -EACCES;
}
/* dst_reg stays as pkt_ptr type and since some positive
* integer value was added to the pointer, increment its 'id'
*/
dst_reg->id = ++env->id_gen;
/* something was added to pkt_ptr, set range and off to zero */
dst_reg->off = 0;
dst_reg->range = 0;
}
return 0;
}
static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = env->cur_state.regs;
struct bpf_reg_state *dst_reg = ®s[insn->dst_reg];
u8 opcode = BPF_OP(insn->code);
s64 imm_log2;
/* for type == UNKNOWN_VALUE:
* imm > 0 -> number of zero upper bits
* imm == 0 -> don't track which is the same as all bits can be non-zero
*/
if (BPF_SRC(insn->code) == BPF_X) {
struct bpf_reg_state *src_reg = ®s[insn->src_reg];
if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 &&
dst_reg->imm && opcode == BPF_ADD) {
/* dreg += sreg
* where both have zero upper bits. Adding them
* can only result making one more bit non-zero
* in the larger value.
* Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47)
* 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47)
*/
dst_reg->imm = min(dst_reg->imm, src_reg->imm);
dst_reg->imm--;
return 0;
}
if (src_reg->type == CONST_IMM && src_reg->imm > 0 &&
dst_reg->imm && opcode == BPF_ADD) {
/* dreg += sreg
* where dreg has zero upper bits and sreg is const.
* Adding them can only result making one more bit
* non-zero in the larger value.
*/
imm_log2 = __ilog2_u64((long long)src_reg->imm);
dst_reg->imm = min(dst_reg->imm, 63 - imm_log2);
dst_reg->imm--;
return 0;
}
/* all other cases non supported yet, just mark dst_reg */
dst_reg->imm = 0;
return 0;
}
/* sign extend 32-bit imm into 64-bit to make sure that
* negative values occupy bit 63. Note ilog2() would have
* been incorrect, since sizeof(insn->imm) == 4
*/
imm_log2 = __ilog2_u64((long long)insn->imm);
if (dst_reg->imm && opcode == BPF_LSH) {
/* reg <<= imm
* if reg was a result of 2 byte load, then its imm == 48
* which means that upper 48 bits are zero and shifting this reg
* left by 4 would mean that upper 44 bits are still zero
*/
dst_reg->imm -= insn->imm;
} else if (dst_reg->imm && opcode == BPF_MUL) {
/* reg *= imm
* if multiplying by 14 subtract 4
* This is conservative calculation of upper zero bits.
* It's not trying to special case insn->imm == 1 or 0 cases
*/
dst_reg->imm -= imm_log2 + 1;
} else if (opcode == BPF_AND) {
/* reg &= imm */
dst_reg->imm = 63 - imm_log2;
} else if (dst_reg->imm && opcode == BPF_ADD) {
/* reg += imm */
dst_reg->imm = min(dst_reg->imm, 63 - imm_log2);
dst_reg->imm--;
} else if (opcode == BPF_RSH) {
/* reg >>= imm
* which means that after right shift, upper bits will be zero
* note that verifier already checked that
* 0 <= imm < 64 for shift insn
*/
dst_reg->imm += insn->imm;
if (unlikely(dst_reg->imm > 64))
/* some dumb code did:
* r2 = *(u32 *)mem;
* r2 >>= 32;
* and all bits are zero now */
dst_reg->imm = 64;
} else {
/* all other alu ops, means that we don't know what will
* happen to the value, mark it with unknown number of zero bits
*/
dst_reg->imm = 0;
}
if (dst_reg->imm < 0) {
/* all 64 bits of the register can contain non-zero bits
* and such value cannot be added to ptr_to_packet, since it
* may overflow, mark it as unknown to avoid further eval
*/
dst_reg->imm = 0;
}
return 0;
}
static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
struct bpf_reg_state *regs = env->cur_state.regs;
struct bpf_reg_state *dst_reg = ®s[insn->dst_reg];
struct bpf_reg_state *src_reg = ®s[insn->src_reg];
u8 opcode = BPF_OP(insn->code);
u64 dst_imm = dst_reg->imm;
/* dst_reg->type == CONST_IMM here. Simulate execution of insns
* containing ALU ops. Don't care about overflow or negative
* values, just add/sub/... them; registers are in u64.
*/
if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K) {
dst_imm += insn->imm;
} else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X &&
src_reg->type == CONST_IMM) {
dst_imm += src_reg->imm;
} else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_K) {
dst_imm -= insn->imm;
} else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_X &&
src_reg->type == CONST_IMM) {
dst_imm -= src_reg->imm;
} else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_K) {
dst_imm *= insn->imm;
} else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_X &&
src_reg->type == CONST_IMM) {
dst_imm *= src_reg->imm;
} else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K) {
dst_imm |= insn->imm;
} else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X &&
src_reg->type == CONST_IMM) {
dst_imm |= src_reg->imm;
} else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_K) {
dst_imm &= insn->imm;
} else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_X &&
src_reg->type == CONST_IMM) {
dst_imm &= src_reg->imm;
} else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_K) {
dst_imm >>= insn->imm;
} else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_X &&
src_reg->type == CONST_IMM) {
dst_imm >>= src_reg->imm;
} else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_K) {
dst_imm <<= insn->imm;
} else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_X &&
src_reg->type == CONST_IMM) {
dst_imm <<= src_reg->imm;
} else {
mark_reg_unknown_value(regs, insn->dst_reg);
goto out;
}
dst_reg->imm = dst_imm;
out:
return 0;
}
static void check_reg_overflow(struct bpf_reg_state *reg)
{
if (reg->max_value > BPF_REGISTER_MAX_RANGE)
reg->max_value = BPF_REGISTER_MAX_RANGE;
if (reg->min_value < BPF_REGISTER_MIN_RANGE ||
reg->min_value > BPF_REGISTER_MAX_RANGE)
reg->min_value = BPF_REGISTER_MIN_RANGE;
}
static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
s64 min_val = BPF_REGISTER_MIN_RANGE;
u64 max_val = BPF_REGISTER_MAX_RANGE;
u8 opcode = BPF_OP(insn->code);
dst_reg = ®s[insn->dst_reg];
if (BPF_SRC(insn->code) == BPF_X) {
check_reg_overflow(®s[insn->src_reg]);
min_val = regs[insn->src_reg].min_value;
max_val = regs[insn->src_reg].max_value;
/* If the source register is a random pointer then the
* min_value/max_value values represent the range of the known
* accesses into that value, not the actual min/max value of the
* register itself. In this case we have to reset the reg range
* values so we know it is not safe to look at.
*/
if (regs[insn->src_reg].type != CONST_IMM &&
regs[insn->src_reg].type != UNKNOWN_VALUE) {
min_val = BPF_REGISTER_MIN_RANGE;
max_val = BPF_REGISTER_MAX_RANGE;
}
} else if (insn->imm < BPF_REGISTER_MAX_RANGE &&
(s64)insn->imm > BPF_REGISTER_MIN_RANGE) {
min_val = max_val = insn->imm;
}
/* We don't know anything about what was done to this register, mark it
* as unknown.
*/
if (min_val == BPF_REGISTER_MIN_RANGE &&
max_val == BPF_REGISTER_MAX_RANGE) {
reset_reg_range_values(regs, insn->dst_reg);
return;
}
/* If one of our values was at the end of our ranges then we can't just
* do our normal operations to the register, we need to set the values
* to the min/max since they are undefined.
*/
if (min_val == BPF_REGISTER_MIN_RANGE)
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
if (max_val == BPF_REGISTER_MAX_RANGE)
dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
switch (opcode) {
case BPF_ADD:
if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
dst_reg->min_value += min_val;
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
dst_reg->max_value += max_val;
break;
case BPF_SUB:
if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
dst_reg->min_value -= min_val;
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
dst_reg->max_value -= max_val;
break;
case BPF_MUL:
if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
dst_reg->min_value *= min_val;
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
dst_reg->max_value *= max_val;
break;
case BPF_AND:
/* Disallow AND'ing of negative numbers, ain't nobody got time
* for that. Otherwise the minimum is 0 and the max is the max
* value we could AND against.
*/
if (min_val < 0)
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
else
dst_reg->min_value = 0;
dst_reg->max_value = max_val;
break;
case BPF_LSH:
/* Gotta have special overflow logic here, if we're shifting
* more than MAX_RANGE then just assume we have an invalid
* range.
*/
if (min_val > ilog2(BPF_REGISTER_MAX_RANGE))
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
dst_reg->min_value <<= min_val;
if (max_val > ilog2(BPF_REGISTER_MAX_RANGE))
dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
dst_reg->max_value <<= max_val;
break;
case BPF_RSH:
/* RSH by a negative number is undefined, and the BPF_RSH is an
* unsigned shift, so make the appropriate casts.
*/
if (min_val < 0 || dst_reg->min_value < 0)
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
else
dst_reg->min_value =
(u64)(dst_reg->min_value) >> min_val;
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
dst_reg->max_value >>= max_val;
break;
default:
reset_reg_range_values(regs, insn->dst_reg);
break;
}
check_reg_overflow(dst_reg);
}
/* check validity of 32-bit and 64-bit arithmetic operations */
static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
u8 opcode = BPF_OP(insn->code);
int err;
if (opcode == BPF_END || opcode == BPF_NEG) {
if (opcode == BPF_NEG) {
if (BPF_SRC(insn->code) != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->off != 0 || insn->imm != 0) {
verbose("BPF_NEG uses reserved fields\n");
return -EINVAL;
}
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
(insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
verbose("BPF_END uses reserved fields\n");
return -EINVAL;
}
}
/* check src operand */
err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->dst_reg)) {
verbose("R%d pointer arithmetic prohibited\n",
insn->dst_reg);
return -EACCES;
}
/* check dest operand */
err = check_reg_arg(regs, insn->dst_reg, DST_OP);
if (err)
return err;
} else if (opcode == BPF_MOV) {
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0 || insn->off != 0) {
verbose("BPF_MOV uses reserved fields\n");
return -EINVAL;
}
/* check src operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
verbose("BPF_MOV uses reserved fields\n");
return -EINVAL;
}
}
/* check dest operand */
err = check_reg_arg(regs, insn->dst_reg, DST_OP);
if (err)
return err;
/* we are setting our register to something new, we need to
* reset its range values.
*/
reset_reg_range_values(regs, insn->dst_reg);
if (BPF_SRC(insn->code) == BPF_X) {
if (BPF_CLASS(insn->code) == BPF_ALU64) {
/* case: R1 = R2
* copy register state to dest reg
*/
regs[insn->dst_reg] = regs[insn->src_reg];
} else {
if (is_pointer_value(env, insn->src_reg)) {
verbose("R%d partial copy of pointer\n",
insn->src_reg);
return -EACCES;
}
mark_reg_unknown_value(regs, insn->dst_reg);
}
} else {
/* case: R = imm
* remember the value we stored into this reg
*/
regs[insn->dst_reg].type = CONST_IMM;
regs[insn->dst_reg].imm = insn->imm;
regs[insn->dst_reg].max_value = insn->imm;
regs[insn->dst_reg].min_value = insn->imm;
}
} else if (opcode > BPF_END) {
verbose("invalid BPF_ALU opcode %x\n", opcode);
return -EINVAL;
} else { /* all other ALU ops: and, sub, xor, add, ... */
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0 || insn->off != 0) {
verbose("BPF_ALU uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
verbose("BPF_ALU uses reserved fields\n");
return -EINVAL;
}
}
/* check src2 operand */
err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
if (err)
return err;
if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
verbose("div by zero\n");
return -EINVAL;
}
if ((opcode == BPF_LSH || opcode == BPF_RSH ||
opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
if (insn->imm < 0 || insn->imm >= size) {
verbose("invalid shift %d\n", insn->imm);
return -EINVAL;
}
}
/* check dest operand */
err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK);
if (err)
return err;
dst_reg = ®s[insn->dst_reg];
/* first we want to adjust our ranges. */
adjust_reg_min_max_vals(env, insn);
/* pattern match 'bpf_add Rx, imm' instruction */
if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
dst_reg->type == FRAME_PTR && BPF_SRC(insn->code) == BPF_K) {
dst_reg->type = PTR_TO_STACK;
dst_reg->imm = insn->imm;
return 0;
} else if (opcode == BPF_ADD &&
BPF_CLASS(insn->code) == BPF_ALU64 &&
dst_reg->type == PTR_TO_STACK &&
((BPF_SRC(insn->code) == BPF_X &&
regs[insn->src_reg].type == CONST_IMM) ||
BPF_SRC(insn->code) == BPF_K)) {
if (BPF_SRC(insn->code) == BPF_X)
dst_reg->imm += regs[insn->src_reg].imm;
else
dst_reg->imm += insn->imm;
return 0;
} else if (opcode == BPF_ADD &&
BPF_CLASS(insn->code) == BPF_ALU64 &&
(dst_reg->type == PTR_TO_PACKET ||
(BPF_SRC(insn->code) == BPF_X &&
regs[insn->src_reg].type == PTR_TO_PACKET))) {
/* ptr_to_packet += K|X */
return check_packet_ptr_add(env, insn);
} else if (BPF_CLASS(insn->code) == BPF_ALU64 &&
dst_reg->type == UNKNOWN_VALUE &&
env->allow_ptr_leaks) {
/* unknown += K|X */
return evaluate_reg_alu(env, insn);
} else if (BPF_CLASS(insn->code) == BPF_ALU64 &&
dst_reg->type == CONST_IMM &&
env->allow_ptr_leaks) {
/* reg_imm += K|X */
return evaluate_reg_imm_alu(env, insn);
} else if (is_pointer_value(env, insn->dst_reg)) {
verbose("R%d pointer arithmetic prohibited\n",
insn->dst_reg);
return -EACCES;
} else if (BPF_SRC(insn->code) == BPF_X &&
is_pointer_value(env, insn->src_reg)) {
verbose("R%d pointer arithmetic prohibited\n",
insn->src_reg);
return -EACCES;
}
/* If we did pointer math on a map value then just set it to our
* PTR_TO_MAP_VALUE_ADJ type so we can deal with any stores or
* loads to this register appropriately, otherwise just mark the
* register as unknown.
*/
if (env->allow_ptr_leaks &&
BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD &&
(dst_reg->type == PTR_TO_MAP_VALUE ||
dst_reg->type == PTR_TO_MAP_VALUE_ADJ))
dst_reg->type = PTR_TO_MAP_VALUE_ADJ;
else
mark_reg_unknown_value(regs, insn->dst_reg);
}
return 0;
}
static void find_good_pkt_pointers(struct bpf_verifier_state *state,
struct bpf_reg_state *dst_reg)
{
struct bpf_reg_state *regs = state->regs, *reg;
int i;
/* LLVM can generate two kind of checks:
*
* Type 1:
*
* r2 = r3;
* r2 += 8;
* if (r2 > pkt_end) goto <handle exception>
* <access okay>
*
* Where:
* r2 == dst_reg, pkt_end == src_reg
* r2=pkt(id=n,off=8,r=0)
* r3=pkt(id=n,off=0,r=0)
*
* Type 2:
*
* r2 = r3;
* r2 += 8;
* if (pkt_end >= r2) goto <access okay>
* <handle exception>
*
* Where:
* pkt_end == dst_reg, r2 == src_reg
* r2=pkt(id=n,off=8,r=0)
* r3=pkt(id=n,off=0,r=0)
*
* Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
* so that range of bytes [r3, r3 + 8) is safe to access.
*/
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
/* keep the maximum range already checked */
regs[i].range = max(regs[i].range, dst_reg->off);
for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
if (state->stack_slot_type[i] != STACK_SPILL)
continue;
reg = &state->spilled_regs[i / BPF_REG_SIZE];
if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
reg->range = max(reg->range, dst_reg->off);
}
}
/* Adjusts the register min/max values in the case that the dst_reg is the
* variable register that we are working on, and src_reg is a constant or we're
* simply doing a BPF_K check.
*/
static void reg_set_min_max(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val,
u8 opcode)
{
switch (opcode) {
case BPF_JEQ:
/* If this is false then we know nothing Jon Snow, but if it is
* true then we know for sure.
*/
true_reg->max_value = true_reg->min_value = val;
break;
case BPF_JNE:
/* If this is true we know nothing Jon Snow, but if it is false
* we know the value for sure;
*/
false_reg->max_value = false_reg->min_value = val;
break;
case BPF_JGT:
/* Unsigned comparison, the minimum value is 0. */
false_reg->min_value = 0;
/* fallthrough */
case BPF_JSGT:
/* If this is false then we know the maximum val is val,
* otherwise we know the min val is val+1.
*/
false_reg->max_value = val;
true_reg->min_value = val + 1;
break;
case BPF_JGE:
/* Unsigned comparison, the minimum value is 0. */
false_reg->min_value = 0;
/* fallthrough */
case BPF_JSGE:
/* If this is false then we know the maximum value is val - 1,
* otherwise we know the mimimum value is val.
*/
false_reg->max_value = val - 1;
true_reg->min_value = val;
break;
default:
break;
}
check_reg_overflow(false_reg);
check_reg_overflow(true_reg);
}
/* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg
* is the variable reg.
*/
static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val,
u8 opcode)
{
switch (opcode) {
case BPF_JEQ:
/* If this is false then we know nothing Jon Snow, but if it is
* true then we know for sure.
*/
true_reg->max_value = true_reg->min_value = val;
break;
case BPF_JNE:
/* If this is true we know nothing Jon Snow, but if it is false
* we know the value for sure;
*/
false_reg->max_value = false_reg->min_value = val;
break;
case BPF_JGT:
/* Unsigned comparison, the minimum value is 0. */
true_reg->min_value = 0;
/* fallthrough */
case BPF_JSGT:
/*
* If this is false, then the val is <= the register, if it is
* true the register <= to the val.
*/
false_reg->min_value = val;
true_reg->max_value = val - 1;
break;
case BPF_JGE:
/* Unsigned comparison, the minimum value is 0. */
true_reg->min_value = 0;
/* fallthrough */
case BPF_JSGE:
/* If this is false then constant < register, if it is true then
* the register < constant.
*/
false_reg->min_value = val + 1;
true_reg->max_value = val;
break;
default:
break;
}
check_reg_overflow(false_reg);
check_reg_overflow(true_reg);
}
static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
enum bpf_reg_type type)
{
struct bpf_reg_state *reg = ®s[regno];
if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
if (type == UNKNOWN_VALUE) {
__mark_reg_unknown_value(regs, regno);
} else if (reg->map_ptr->inner_map_meta) {
reg->type = CONST_PTR_TO_MAP;
reg->map_ptr = reg->map_ptr->inner_map_meta;
} else {
reg->type = type;
}
/* We don't need id from this point onwards anymore, thus we
* should better reset it, so that state pruning has chances
* to take effect.
*/
reg->id = 0;
}
}
/* The logic is similar to find_good_pkt_pointers(), both could eventually
* be folded together at some point.
*/
static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
enum bpf_reg_type type)
{
struct bpf_reg_state *regs = state->regs;
u32 id = regs[regno].id;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
mark_map_reg(regs, i, id, type);
for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
if (state->stack_slot_type[i] != STACK_SPILL)
continue;
mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, type);
}
}
static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{
struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state;
struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
u8 opcode = BPF_OP(insn->code);
int err;
if (opcode > BPF_EXIT) {
verbose("invalid BPF_JMP opcode %x\n", opcode);
return -EINVAL;
}
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0) {
verbose("BPF_JMP uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->src_reg)) {
verbose("R%d pointer comparison prohibited\n",
insn->src_reg);
return -EACCES;
}
} else {
if (insn->src_reg != BPF_REG_0) {
verbose("BPF_JMP uses reserved fields\n");
return -EINVAL;
}
}
/* check src2 operand */
err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
if (err)
return err;
dst_reg = ®s[insn->dst_reg];
/* detect if R == 0 where R was initialized to zero earlier */
if (BPF_SRC(insn->code) == BPF_K &&
(opcode == BPF_JEQ || opcode == BPF_JNE) &&
dst_reg->type == CONST_IMM && dst_reg->imm == insn->imm) {
if (opcode == BPF_JEQ) {
/* if (imm == imm) goto pc+off;
* only follow the goto, ignore fall-through
*/
*insn_idx += insn->off;
return 0;
} else {
/* if (imm != imm) goto pc+off;
* only follow fall-through branch, since
* that's where the program will go
*/
return 0;
}
}
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
if (!other_branch)
return -EFAULT;
/* detect if we are comparing against a constant value so we can adjust
* our min/max values for our dst register.
*/
if (BPF_SRC(insn->code) == BPF_X) {
if (regs[insn->src_reg].type == CONST_IMM)
reg_set_min_max(&other_branch->regs[insn->dst_reg],
dst_reg, regs[insn->src_reg].imm,
opcode);
else if (dst_reg->type == CONST_IMM)
reg_set_min_max_inv(&other_branch->regs[insn->src_reg],
®s[insn->src_reg], dst_reg->imm,
opcode);
} else {
reg_set_min_max(&other_branch->regs[insn->dst_reg],
dst_reg, insn->imm, opcode);
}
/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
if (BPF_SRC(insn->code) == BPF_K &&
insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
/* Mark all identical map registers in each branch as either
* safe or unknown depending R == 0 or R != 0 conditional.
*/
mark_map_regs(this_branch, insn->dst_reg,
opcode == BPF_JEQ ? PTR_TO_MAP_VALUE : UNKNOWN_VALUE);
mark_map_regs(other_branch, insn->dst_reg,
opcode == BPF_JEQ ? UNKNOWN_VALUE : PTR_TO_MAP_VALUE);
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
dst_reg->type == PTR_TO_PACKET &&
regs[insn->src_reg].type == PTR_TO_PACKET_END) {
find_good_pkt_pointers(this_branch, dst_reg);
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
dst_reg->type == PTR_TO_PACKET_END &&
regs[insn->src_reg].type == PTR_TO_PACKET) {
find_good_pkt_pointers(other_branch, ®s[insn->src_reg]);
} else if (is_pointer_value(env, insn->dst_reg)) {
verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
return -EACCES;
}
if (log_level)
print_verifier_state(this_branch);
return 0;
}
/* return the map pointer stored inside BPF_LD_IMM64 instruction */
static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
{
u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
return (struct bpf_map *) (unsigned long) imm64;
}
/* verify BPF_LD_IMM64 instruction */
static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = env->cur_state.regs;
int err;
if (BPF_SIZE(insn->code) != BPF_DW) {
verbose("invalid BPF_LD_IMM insn\n");
return -EINVAL;
}
if (insn->off != 0) {
verbose("BPF_LD_IMM64 uses reserved fields\n");
return -EINVAL;
}
err = check_reg_arg(regs, insn->dst_reg, DST_OP);
if (err)
return err;
if (insn->src_reg == 0) {
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
regs[insn->dst_reg].type = CONST_IMM;
regs[insn->dst_reg].imm = imm;
return 0;
}
/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
return 0;
}
static bool may_access_skb(enum bpf_prog_type type)
{
switch (type) {
case BPF_PROG_TYPE_SOCKET_FILTER:
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
return true;
default:
return false;
}
}
/* verify safety of LD_ABS|LD_IND instructions:
* - they can only appear in the programs where ctx == skb
* - since they are wrappers of function calls, they scratch R1-R5 registers,
* preserve R6-R9, and store return value into R0
*
* Implicit input:
* ctx == skb == R6 == CTX
*
* Explicit input:
* SRC == any register
* IMM == 32-bit immediate
*
* Output:
* R0 - 8/16/32-bit skb data converted to cpu endianness
*/
static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = env->cur_state.regs;
u8 mode = BPF_MODE(insn->code);
struct bpf_reg_state *reg;
int i, err;
if (!may_access_skb(env->prog->type)) {
verbose("BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
return -EINVAL;
}
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
BPF_SIZE(insn->code) == BPF_DW ||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
verbose("BPF_LD_[ABS|IND] uses reserved fields\n");
return -EINVAL;
}
/* check whether implicit source operand (register R6) is readable */
err = check_reg_arg(regs, BPF_REG_6, SRC_OP);
if (err)
return err;
if (regs[BPF_REG_6].type != PTR_TO_CTX) {
verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
return -EINVAL;
}
if (mode == BPF_IND) {
/* check explicit source operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
}
/* reset caller saved regs to unreadable */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
reg = regs + caller_saved[i];
reg->type = NOT_INIT;
reg->imm = 0;
}
/* mark destination R0 register as readable, since it contains
* the value fetched from the packet
*/
regs[BPF_REG_0].type = UNKNOWN_VALUE;
return 0;
}
/* non-recursive DFS pseudo code
* 1 procedure DFS-iterative(G,v):
* 2 label v as discovered
* 3 let S be a stack
* 4 S.push(v)
* 5 while S is not empty
* 6 t <- S.pop()
* 7 if t is what we're looking for:
* 8 return t
* 9 for all edges e in G.adjacentEdges(t) do
* 10 if edge e is already labelled
* 11 continue with the next edge
* 12 w <- G.adjacentVertex(t,e)
* 13 if vertex w is not discovered and not explored
* 14 label e as tree-edge
* 15 label w as discovered
* 16 S.push(w)
* 17 continue at 5
* 18 else if vertex w is discovered
* 19 label e as back-edge
* 20 else
* 21 // vertex w is explored
* 22 label e as forward- or cross-edge
* 23 label t as explored
* 24 S.pop()
*
* convention:
* 0x10 - discovered
* 0x11 - discovered and fall-through edge labelled
* 0x12 - discovered and fall-through and branch edges labelled
* 0x20 - explored
*/
enum {
DISCOVERED = 0x10,
EXPLORED = 0x20,
FALLTHROUGH = 1,
BRANCH = 2,
};
#define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
static int *insn_stack; /* stack of insns to process */
static int cur_stack; /* current stack index */
static int *insn_state;
/* t, w, e - match pseudo-code above:
* t - index of current instruction
* w - next instruction
* e - edge
*/
static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
{
if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
return 0;
if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
return 0;
if (w < 0 || w >= env->prog->len) {
verbose("jump out of range from insn %d to %d\n", t, w);
return -EINVAL;
}
if (e == BRANCH)
/* mark branch target for state pruning */
env->explored_states[w] = STATE_LIST_MARK;
if (insn_state[w] == 0) {
/* tree-edge */
insn_state[t] = DISCOVERED | e;
insn_state[w] = DISCOVERED;
if (cur_stack >= env->prog->len)
return -E2BIG;
insn_stack[cur_stack++] = w;
return 1;
} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
verbose("back-edge from insn %d to %d\n", t, w);
return -EINVAL;
} else if (insn_state[w] == EXPLORED) {
/* forward- or cross-edge */
insn_state[t] = DISCOVERED | e;
} else {
verbose("insn state internal bug\n");
return -EFAULT;
}
return 0;
}
/* non-recursive depth-first-search to detect loops in BPF program
* loop == back-edge in directed graph
*/
static int check_cfg(struct bpf_verifier_env *env)
{
struct bpf_insn *insns = env->prog->insnsi;
int insn_cnt = env->prog->len;
int ret = 0;
int i, t;
insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_state)
return -ENOMEM;
insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_stack) {
kfree(insn_state);
return -ENOMEM;
}
insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
insn_stack[0] = 0; /* 0 is the first instruction */
cur_stack = 1;
peek_stack:
if (cur_stack == 0)
goto check_state;
t = insn_stack[cur_stack - 1];
if (BPF_CLASS(insns[t].code) == BPF_JMP) {
u8 opcode = BPF_OP(insns[t].code);
if (opcode == BPF_EXIT) {
goto mark_explored;
} else if (opcode == BPF_CALL) {
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
if (t + 1 < insn_cnt)
env->explored_states[t + 1] = STATE_LIST_MARK;
} else if (opcode == BPF_JA) {
if (BPF_SRC(insns[t].code) != BPF_K) {
ret = -EINVAL;
goto err_free;
}
/* unconditional jump with single edge */
ret = push_insn(t, t + insns[t].off + 1,
FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
/* tell verifier to check for equivalent states
* after every call and jump
*/
if (t + 1 < insn_cnt)
env->explored_states[t + 1] = STATE_LIST_MARK;
} else {
/* conditional jump with two edges */
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
}
} else {
/* all other non-branch instructions with single
* fall-through edge
*/
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
}
mark_explored:
insn_state[t] = EXPLORED;
if (cur_stack-- <= 0) {
verbose("pop stack internal bug\n");
ret = -EFAULT;
goto err_free;
}
goto peek_stack;
check_state:
for (i = 0; i < insn_cnt; i++) {
if (insn_state[i] != EXPLORED) {
verbose("unreachable insn %d\n", i);
ret = -EINVAL;
goto err_free;
}
}
ret = 0; /* cfg looks good */
err_free:
kfree(insn_state);
kfree(insn_stack);
return ret;
}
/* the following conditions reduce the number of explored insns
* from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet
*/
static bool compare_ptrs_to_packet(struct bpf_reg_state *old,
struct bpf_reg_state *cur)
{
if (old->id != cur->id)
return false;
/* old ptr_to_packet is more conservative, since it allows smaller
* range. Ex:
* old(off=0,r=10) is equal to cur(off=0,r=20), because
* old(off=0,r=10) means that with range=10 the verifier proceeded
* further and found no issues with the program. Now we're in the same
* spot with cur(off=0,r=20), so we're safe too, since anything further
* will only be looking at most 10 bytes after this pointer.
*/
if (old->off == cur->off && old->range < cur->range)
return true;
/* old(off=20,r=10) is equal to cur(off=22,re=22 or 5 or 0)
* since both cannot be used for packet access and safe(old)
* pointer has smaller off that could be used for further
* 'if (ptr > data_end)' check
* Ex:
* old(off=20,r=10) and cur(off=22,r=22) and cur(off=22,r=0) mean
* that we cannot access the packet.
* The safe range is:
* [ptr, ptr + range - off)
* so whenever off >=range, it means no safe bytes from this pointer.
* When comparing old->off <= cur->off, it means that older code
* went with smaller offset and that offset was later
* used to figure out the safe range after 'if (ptr > data_end)' check
* Say, 'old' state was explored like:
* ... R3(off=0, r=0)
* R4 = R3 + 20
* ... now R4(off=20,r=0) <-- here
* if (R4 > data_end)
* ... R4(off=20,r=20), R3(off=0,r=20) and R3 can be used to access.
* ... the code further went all the way to bpf_exit.
* Now the 'cur' state at the mark 'here' has R4(off=30,r=0).
* old_R4(off=20,r=0) equal to cur_R4(off=30,r=0), since if the verifier
* goes further, such cur_R4 will give larger safe packet range after
* 'if (R4 > data_end)' and all further insn were already good with r=20,
* so they will be good with r=30 and we can prune the search.
*/
if (old->off <= cur->off &&
old->off >= old->range && cur->off >= cur->range)
return true;
return false;
}
/* compare two verifier states
*
* all states stored in state_list are known to be valid, since
* verifier reached 'bpf_exit' instruction through them
*
* this function is called when verifier exploring different branches of
* execution popped from the state stack. If it sees an old state that has
* more strict register state and more strict stack state then this execution
* branch doesn't need to be explored further, since verifier already
* concluded that more strict state leads to valid finish.
*
* Therefore two states are equivalent if register state is more conservative
* and explored stack state is more conservative than the current one.
* Example:
* explored current
* (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
* (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
*
* In other words if current stack state (one being explored) has more
* valid slots than old one that already passed validation, it means
* the verifier can stop exploring and conclude that current state is valid too
*
* Similarly with registers. If explored state has register type as invalid
* whereas register type in current state is meaningful, it means that
* the current state will reach 'bpf_exit' instruction safely
*/
static bool states_equal(struct bpf_verifier_env *env,
struct bpf_verifier_state *old,
struct bpf_verifier_state *cur)
{
bool varlen_map_access = env->varlen_map_value_access;
struct bpf_reg_state *rold, *rcur;
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
rold = &old->regs[i];
rcur = &cur->regs[i];
if (memcmp(rold, rcur, sizeof(*rold)) == 0)
continue;
/* If the ranges were not the same, but everything else was and
* we didn't do a variable access into a map then we are a-ok.
*/
if (!varlen_map_access &&
memcmp(rold, rcur, offsetofend(struct bpf_reg_state, id)) == 0)
continue;
/* If we didn't map access then again we don't care about the
* mismatched range values and it's ok if our old type was
* UNKNOWN and we didn't go to a NOT_INIT'ed reg.
*/
if (rold->type == NOT_INIT ||
(!varlen_map_access && rold->type == UNKNOWN_VALUE &&
rcur->type != NOT_INIT))
continue;
if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
compare_ptrs_to_packet(rold, rcur))
continue;
return false;
}
for (i = 0; i < MAX_BPF_STACK; i++) {
if (old->stack_slot_type[i] == STACK_INVALID)
continue;
if (old->stack_slot_type[i] != cur->stack_slot_type[i])
/* Ex: old explored (safe) state has STACK_SPILL in
* this stack slot, but current has has STACK_MISC ->
* this verifier states are not equivalent,
* return false to continue verification of this path
*/
return false;
if (i % BPF_REG_SIZE)
continue;
if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE],
&cur->spilled_regs[i / BPF_REG_SIZE],
sizeof(old->spilled_regs[0])))
/* when explored and current stack slot types are
* the same, check that stored pointers types
* are the same as well.
* Ex: explored safe path could have stored
* (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -8}
* but current path has stored:
* (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -16}
* such verifier states are not equivalent.
* return false to continue verification of this path
*/
return false;
else
continue;
}
return true;
}
static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
{
struct bpf_verifier_state_list *new_sl;
struct bpf_verifier_state_list *sl;
sl = env->explored_states[insn_idx];
if (!sl)
/* this 'insn_idx' instruction wasn't marked, so we will not
* be doing state search here
*/
return 0;
while (sl != STATE_LIST_MARK) {
if (states_equal(env, &sl->state, &env->cur_state))
/* reached equivalent register/stack state,
* prune the search
*/
return 1;
sl = sl->next;
}
/* there were no equivalent states, remember current one.
* technically the current state is not proven to be safe yet,
* but it will either reach bpf_exit (which means it's safe) or
* it will be rejected. Since there are no loops, we won't be
* seeing this 'insn_idx' instruction again on the way to bpf_exit
*/
new_sl = kmalloc(sizeof(struct bpf_verifier_state_list), GFP_USER);
if (!new_sl)
return -ENOMEM;
/* add new state to the head of linked list */
memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state));
new_sl->next = env->explored_states[insn_idx];
env->explored_states[insn_idx] = new_sl;
return 0;
}
static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx)
{
if (!env->analyzer_ops || !env->analyzer_ops->insn_hook)
return 0;
return env->analyzer_ops->insn_hook(env, insn_idx, prev_insn_idx);
}
static int do_check(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *state = &env->cur_state;
struct bpf_insn *insns = env->prog->insnsi;
struct bpf_reg_state *regs = state->regs;
int insn_cnt = env->prog->len;
int insn_idx, prev_insn_idx = 0;
int insn_processed = 0;
bool do_print_state = false;
init_reg_state(regs);
insn_idx = 0;
env->varlen_map_value_access = false;
for (;;) {
struct bpf_insn *insn;
u8 class;
int err;
if (insn_idx >= insn_cnt) {
verbose("invalid insn idx %d insn_cnt %d\n",
insn_idx, insn_cnt);
return -EFAULT;
}
insn = &insns[insn_idx];
class = BPF_CLASS(insn->code);
if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
verbose("BPF program is too large. Processed %d insn\n",
insn_processed);
return -E2BIG;
}
err = is_state_visited(env, insn_idx);
if (err < 0)
return err;
if (err == 1) {
/* found equivalent state, can prune the search */
if (log_level) {
if (do_print_state)
verbose("\nfrom %d to %d: safe\n",
prev_insn_idx, insn_idx);
else
verbose("%d: safe\n", insn_idx);
}
goto process_bpf_exit;
}
if (log_level && do_print_state) {
verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx);
print_verifier_state(&env->cur_state);
do_print_state = false;
}
if (log_level) {
verbose("%d: ", insn_idx);
print_bpf_insn(env, insn);
}
err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
if (err)
return err;
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
return err;
} else if (class == BPF_LDX) {
enum bpf_reg_type *prev_src_type, src_reg_type;
/* check for reserved fields is already done */
/* check src operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK);
if (err)
return err;
src_reg_type = regs[insn->src_reg].type;
/* check that memory (src_reg + off) is readable,
* the state of dst_reg will be updated by this func
*/
err = check_mem_access(env, insn->src_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ,
insn->dst_reg);
if (err)
return err;
if (BPF_SIZE(insn->code) != BPF_W &&
BPF_SIZE(insn->code) != BPF_DW) {
insn_idx++;
continue;
}
prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
if (*prev_src_type == NOT_INIT) {
/* saw a valid insn
* dst_reg = *(u32 *)(src_reg + off)
* save type to validate intersecting paths
*/
*prev_src_type = src_reg_type;
} else if (src_reg_type != *prev_src_type &&
(src_reg_type == PTR_TO_CTX ||
*prev_src_type == PTR_TO_CTX)) {
/* ABuser program is trying to use the same insn
* dst_reg = *(u32*) (src_reg + off)
* with different pointer types:
* src_reg == ctx in one branch and
* src_reg == stack|map in some other branch.
* Reject it.
*/
verbose("same insn cannot be used with different pointers\n");
return -EINVAL;
}
} else if (class == BPF_STX) {
enum bpf_reg_type *prev_dst_type, dst_reg_type;
if (BPF_MODE(insn->code) == BPF_XADD) {
err = check_xadd(env, insn);
if (err)
return err;
insn_idx++;
continue;
}
/* check src1 operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
return err;
/* check src2 operand */
err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
if (err)
return err;
dst_reg_type = regs[insn->dst_reg].type;
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
insn->src_reg);
if (err)
return err;
prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
if (*prev_dst_type == NOT_INIT) {
*prev_dst_type = dst_reg_type;
} else if (dst_reg_type != *prev_dst_type &&
(dst_reg_type == PTR_TO_CTX ||
*prev_dst_type == PTR_TO_CTX)) {
verbose("same insn cannot be used with different pointers\n");
return -EINVAL;
}
} else if (class == BPF_ST) {
if (BPF_MODE(insn->code) != BPF_MEM ||
insn->src_reg != BPF_REG_0) {
verbose("BPF_ST uses reserved fields\n");
return -EINVAL;
}
/* check src operand */
err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
if (err)
return err;
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
-1);
if (err)
return err;
} else if (class == BPF_JMP) {
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->off != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose("BPF_CALL uses reserved fields\n");
return -EINVAL;
}
err = check_call(env, insn->imm, insn_idx);
if (err)
return err;
} else if (opcode == BPF_JA) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose("BPF_JA uses reserved fields\n");
return -EINVAL;
}
insn_idx += insn->off + 1;
continue;
} else if (opcode == BPF_EXIT) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose("BPF_EXIT uses reserved fields\n");
return -EINVAL;
}
/* eBPF calling convetion is such that R0 is used
* to return the value from eBPF program.
* Make sure that it's readable at this time
* of bpf_exit, which means that program wrote
* something into it earlier
*/
err = check_reg_arg(regs, BPF_REG_0, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, BPF_REG_0)) {
verbose("R0 leaks addr as return value\n");
return -EACCES;
}
process_bpf_exit:
insn_idx = pop_stack(env, &prev_insn_idx);
if (insn_idx < 0) {
break;
} else {
do_print_state = true;
continue;
}
} else {
err = check_cond_jmp_op(env, insn, &insn_idx);
if (err)
return err;
}
} else if (class == BPF_LD) {
u8 mode = BPF_MODE(insn->code);
if (mode == BPF_ABS || mode == BPF_IND) {
err = check_ld_abs(env, insn);
if (err)
return err;
} else if (mode == BPF_IMM) {
err = check_ld_imm(env, insn);
if (err)
return err;
insn_idx++;
} else {
verbose("invalid BPF_LD mode\n");
return -EINVAL;
}
reset_reg_range_values(regs, insn->dst_reg);
} else {
verbose("unknown insn class %d\n", class);
return -EINVAL;
}
insn_idx++;
}
verbose("processed %d insns\n", insn_processed);
return 0;
}
static int check_map_prealloc(struct bpf_map *map)
{
return (map->map_type != BPF_MAP_TYPE_HASH &&
map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
!(map->map_flags & BPF_F_NO_PREALLOC);
}
static int check_map_prog_compatibility(struct bpf_map *map,
struct bpf_prog *prog)
{
/* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
* preallocated hash maps, since doing memory allocation
* in overflow_handler can crash depending on where nmi got
* triggered.
*/
if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
if (!check_map_prealloc(map)) {
verbose("perf_event programs can only use preallocated hash map\n");
return -EINVAL;
}
if (map->inner_map_meta &&
!check_map_prealloc(map->inner_map_meta)) {
verbose("perf_event programs can only use preallocated inner hash map\n");
return -EINVAL;
}
}
return 0;
}
/* look for pseudo eBPF instructions that access map FDs and
* replace them with actual map pointers
*/
static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i, j, err;
err = bpf_prog_calc_tag(env->prog);
if (err)
return err;
for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX &&
(BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
verbose("BPF_LDX uses reserved fields\n");
return -EINVAL;
}
if (BPF_CLASS(insn->code) == BPF_STX &&
((BPF_MODE(insn->code) != BPF_MEM &&
BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
verbose("BPF_STX uses reserved fields\n");
return -EINVAL;
}
if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
struct bpf_map *map;
struct fd f;
if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
insn[1].off != 0) {
verbose("invalid bpf_ld_imm64 insn\n");
return -EINVAL;
}
if (insn->src_reg == 0)
/* valid generic load 64-bit imm */
goto next_insn;
if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
verbose("unrecognized bpf_ld_imm64 insn\n");
return -EINVAL;
}
f = fdget(insn->imm);
map = __bpf_map_get(f);
if (IS_ERR(map)) {
verbose("fd %d is not pointing to valid bpf_map\n",
insn->imm);
return PTR_ERR(map);
}
err = check_map_prog_compatibility(map, env->prog);
if (err) {
fdput(f);
return err;
}
/* store map pointer inside BPF_LD_IMM64 instruction */
insn[0].imm = (u32) (unsigned long) map;
insn[1].imm = ((u64) (unsigned long) map) >> 32;
/* check whether we recorded this map already */
for (j = 0; j < env->used_map_cnt; j++)
if (env->used_maps[j] == map) {
fdput(f);
goto next_insn;
}
if (env->used_map_cnt >= MAX_USED_MAPS) {
fdput(f);
return -E2BIG;
}
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded
* and all maps are released in free_bpf_prog_info()
*/
map = bpf_map_inc(map, false);
if (IS_ERR(map)) {
fdput(f);
return PTR_ERR(map);
}
env->used_maps[env->used_map_cnt++] = map;
fdput(f);
next_insn:
insn++;
i++;
}
}
/* now all pseudo BPF_LD_IMM64 instructions load valid
* 'struct bpf_map *' into a register instead of user map_fd.
* These pointers will be used later by verifier to validate map access.
*/
return 0;
}
/* drop refcnt of maps used by the rejected program */
static void release_maps(struct bpf_verifier_env *env)
{
int i;
for (i = 0; i < env->used_map_cnt; i++)
bpf_map_put(env->used_maps[i]);
}
/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i;
for (i = 0; i < insn_cnt; i++, insn++)
if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
insn->src_reg = 0;
}
/* single env->prog->insni[off] instruction was replaced with the range
* insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
* [0, off) and [off, end) to new locations, so the patched range stays zero
*/
static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
u32 off, u32 cnt)
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
if (cnt == 1)
return 0;
new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
if (!new_data)
return -ENOMEM;
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
env->insn_aux_data = new_data;
vfree(old_data);
return 0;
}
static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
const struct bpf_insn *patch, u32 len)
{
struct bpf_prog *new_prog;
new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
if (!new_prog)
return NULL;
if (adjust_insn_aux_data(env, new_prog->len, off, len))
return NULL;
return new_prog;
}
/* convert load instructions that access fields of 'struct __sk_buff'
* into sequence of instructions that access fields of 'struct sk_buff'
*/
static int convert_ctx_accesses(struct bpf_verifier_env *env)
{
const struct bpf_verifier_ops *ops = env->prog->aux->ops;
const int insn_cnt = env->prog->len;
struct bpf_insn insn_buf[16], *insn;
struct bpf_prog *new_prog;
enum bpf_access_type type;
int i, cnt, delta = 0;
if (ops->gen_prologue) {
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
env->prog);
if (cnt >= ARRAY_SIZE(insn_buf)) {
verbose("bpf verifier is misconfigured\n");
return -EINVAL;
} else if (cnt) {
new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
env->prog = new_prog;
delta += cnt - 1;
}
}
if (!ops->convert_ctx_access)
return 0;
insn = env->prog->insnsi + delta;
for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
type = BPF_READ;
else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
insn->code == (BPF_STX | BPF_MEM | BPF_DW))
type = BPF_WRITE;
else
continue;
if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
continue;
cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose("bpf verifier is misconfigured\n");
return -EINVAL;
}
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
/* keep walking new program and skip insns we just inserted */
env->prog = new_prog;
insn = new_prog->insnsi + i + delta;
}
return 0;
}
/* fixup insn->imm field of bpf_call instructions
* and inline eligible helpers as explicit sequence of BPF instructions
*
* this function is called after eBPF program passed verification
*/
static int fixup_bpf_calls(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog;
struct bpf_insn *insn = prog->insnsi;
const struct bpf_func_proto *fn;
const int insn_cnt = prog->len;
struct bpf_insn insn_buf[16];
struct bpf_prog *new_prog;
struct bpf_map *map_ptr;
int i, cnt, delta = 0;
for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL))
continue;
if (insn->imm == BPF_FUNC_get_route_realm)
prog->dst_needed = 1;
if (insn->imm == BPF_FUNC_get_prandom_u32)
bpf_user_rnd_init_once();
if (insn->imm == BPF_FUNC_tail_call) {
/* If we tail call into other programs, we
* cannot make any assumptions since they can
* be replaced dynamically during runtime in
* the program array.
*/
prog->cb_access = 1;
/* mark bpf_tail_call as different opcode to avoid
* conditional branch in the interpeter for every normal
* call and to prevent accidental JITing by JIT compiler
* that doesn't support bpf_tail_call yet
*/
insn->imm = 0;
insn->code |= BPF_X;
continue;
}
if (ebpf_jit_enabled() && insn->imm == BPF_FUNC_map_lookup_elem) {
map_ptr = env->insn_aux_data[i + delta].map_ptr;
if (map_ptr == BPF_MAP_PTR_POISON ||
!map_ptr->ops->map_gen_lookup)
goto patch_call_imm;
cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose("bpf verifier is misconfigured\n");
return -EINVAL;
}
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
/* keep walking new program and skip insns we just inserted */
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
patch_call_imm:
fn = prog->aux->ops->get_func_proto(insn->imm);
/* all functions that have prototype and verifier allowed
* programs to call them, must be real in-kernel functions
*/
if (!fn->func) {
verbose("kernel subsystem misconfigured func %s#%d\n",
func_id_name(insn->imm), insn->imm);
return -EFAULT;
}
insn->imm = fn->func - __bpf_call_base;
}
return 0;
}
static void free_states(struct bpf_verifier_env *env)
{
struct bpf_verifier_state_list *sl, *sln;
int i;
if (!env->explored_states)
return;
for (i = 0; i < env->prog->len; i++) {
sl = env->explored_states[i];
if (sl)
while (sl != STATE_LIST_MARK) {
sln = sl->next;
kfree(sl);
sl = sln;
}
}
kfree(env->explored_states);
}
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
{
char __user *log_ubuf = NULL;
struct bpf_verifier_env *env;
int ret = -EINVAL;
/* 'struct bpf_verifier_env' can be global, but since it's not small,
* allocate/free it every time bpf_check() is called
*/
env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
(*prog)->len);
ret = -ENOMEM;
if (!env->insn_aux_data)
goto err_free_env;
env->prog = *prog;
/* grab the mutex to protect few globals used by verifier */
mutex_lock(&bpf_verifier_lock);
if (attr->log_level || attr->log_buf || attr->log_size) {
/* user requested verbose verifier output
* and supplied buffer to store the verification trace
*/
log_level = attr->log_level;
log_ubuf = (char __user *) (unsigned long) attr->log_buf;
log_size = attr->log_size;
log_len = 0;
ret = -EINVAL;
/* log_* values have to be sane */
if (log_size < 128 || log_size > UINT_MAX >> 8 ||
log_level == 0 || log_ubuf == NULL)
goto err_unlock;
ret = -ENOMEM;
log_buf = vmalloc(log_size);
if (!log_buf)
goto err_unlock;
} else {
log_level = 0;
}
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;
env->explored_states = kcalloc(env->prog->len,
sizeof(struct bpf_verifier_state_list *),
GFP_USER);
ret = -ENOMEM;
if (!env->explored_states)
goto skip_full_check;
ret = check_cfg(env);
if (ret < 0)
goto skip_full_check;
env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
ret = do_check(env);
skip_full_check:
while (pop_stack(env, NULL) >= 0);
free_states(env);
if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);
if (ret == 0)
ret = fixup_bpf_calls(env);
if (log_level && log_len >= log_size - 1) {
BUG_ON(log_len >= log_size);
/* verifier log exceeded user supplied buffer */
ret = -ENOSPC;
/* fall through to return what was recorded */
}
/* copy verifier log back to user space including trailing zero */
if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) {
ret = -EFAULT;
goto free_log_buf;
}
if (ret == 0 && env->used_map_cnt) {
/* if program passed verifier, update used_maps in bpf_prog_info */
env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
sizeof(env->used_maps[0]),
GFP_KERNEL);
if (!env->prog->aux->used_maps) {
ret = -ENOMEM;
goto free_log_buf;
}
memcpy(env->prog->aux->used_maps, env->used_maps,
sizeof(env->used_maps[0]) * env->used_map_cnt);
env->prog->aux->used_map_cnt = env->used_map_cnt;
/* program is valid. Convert pseudo bpf_ld_imm64 into generic
* bpf_ld_imm64 instructions
*/
convert_pseudo_ld_imm64(env);
}
free_log_buf:
if (log_level)
vfree(log_buf);
if (!env->prog->aux->used_maps)
/* if we didn't copy map pointers into bpf_prog_info, release
* them now. Otherwise free_bpf_prog_info() will release them.
*/
release_maps(env);
*prog = env->prog;
err_unlock:
mutex_unlock(&bpf_verifier_lock);
vfree(env->insn_aux_data);
err_free_env:
kfree(env);
return ret;
}
int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
void *priv)
{
struct bpf_verifier_env *env;
int ret;
env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
prog->len);
ret = -ENOMEM;
if (!env->insn_aux_data)
goto err_free_env;
env->prog = prog;
env->analyzer_ops = ops;
env->analyzer_priv = priv;
/* grab the mutex to protect few globals used by verifier */
mutex_lock(&bpf_verifier_lock);
log_level = 0;
env->explored_states = kcalloc(env->prog->len,
sizeof(struct bpf_verifier_state_list *),
GFP_KERNEL);
ret = -ENOMEM;
if (!env->explored_states)
goto skip_full_check;
ret = check_cfg(env);
if (ret < 0)
goto skip_full_check;
env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
ret = do_check(env);
skip_full_check:
while (pop_stack(env, NULL) >= 0);
free_states(env);
mutex_unlock(&bpf_verifier_lock);
vfree(env->insn_aux_data);
err_free_env:
kfree(env);
return ret;
}
EXPORT_SYMBOL_GPL(bpf_analyzer);
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_3366_0 |
crossvul-cpp_data_good_3829_0 | /*
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
* Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
* Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz>
*
* An implementation of the DCCP protocol
*
* This code has been developed by the University of Waikato WAND
* research group. For further information please see http://www.wand.net.nz/
*
* This code also uses code from Lulea University, rereleased as GPL by its
* authors:
* Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
*
* Changes to meet Linux coding standards, to make it meet latest ccid3 draft
* and to make it work as a loadable module in the DCCP stack written by
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>.
*
* Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "../dccp.h"
#include "ccid3.h"
#include <asm/unaligned.h>
#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
static bool ccid3_debug;
#define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a)
#else
#define ccid3_pr_debug(format, a...)
#endif
/*
* Transmitter Half-Connection Routines
*/
#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
{
static const char *const ccid3_state_names[] = {
[TFRC_SSTATE_NO_SENT] = "NO_SENT",
[TFRC_SSTATE_NO_FBACK] = "NO_FBACK",
[TFRC_SSTATE_FBACK] = "FBACK",
};
return ccid3_state_names[state];
}
#endif
static void ccid3_hc_tx_set_state(struct sock *sk,
enum ccid3_hc_tx_states state)
{
struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
enum ccid3_hc_tx_states oldstate = hc->tx_state;
ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
dccp_role(sk), sk, ccid3_tx_state_name(oldstate),
ccid3_tx_state_name(state));
WARN_ON(state == oldstate);
hc->tx_state = state;
}
/*
* Compute the initial sending rate X_init in the manner of RFC 3390:
*
* X_init = min(4 * s, max(2 * s, 4380 bytes)) / RTT
*
* Note that RFC 3390 uses MSS, RFC 4342 refers to RFC 3390, and rfc3448bis
* (rev-02) clarifies the use of RFC 3390 with regard to the above formula.
* For consistency with other parts of the code, X_init is scaled by 2^6.
*/
static inline u64 rfc3390_initial_rate(struct sock *sk)
{
const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
const __u32 w_init = clamp_t(__u32, 4380U, 2 * hc->tx_s, 4 * hc->tx_s);
return scaled_div(w_init << 6, hc->tx_rtt);
}
/**
* ccid3_update_send_interval - Calculate new t_ipi = s / X_inst
* This respects the granularity of X_inst (64 * bytes/second).
*/
static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc)
{
hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x);
DCCP_BUG_ON(hc->tx_t_ipi == 0);
ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi,
hc->tx_s, (unsigned int)(hc->tx_x >> 6));
}
static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
{
u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count);
return delta / hc->tx_rtt;
}
/**
* ccid3_hc_tx_update_x - Update allowed sending rate X
* @stamp: most recent time if available - can be left NULL.
*
* This function tracks draft rfc3448bis, check there for latest details.
*
* Note: X and X_recv are both stored in units of 64 * bytes/second, to support
* fine-grained resolution of sending rates. This requires scaling by 2^6
* throughout the code. Only X_calc is unscaled (in bytes/second).
*
*/
static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
{
struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
__u64 min_rate = 2 * hc->tx_x_recv;
const __u64 old_x = hc->tx_x;
ktime_t now = stamp ? *stamp : ktime_get_real();
/*
* Handle IDLE periods: do not reduce below RFC3390 initial sending rate
* when idling [RFC 4342, 5.1]. Definition of idling is from rfc3448bis:
* a sender is idle if it has not sent anything over a 2-RTT-period.
* For consistency with X and X_recv, min_rate is also scaled by 2^6.
*/
if (ccid3_hc_tx_idle_rtt(hc, now) >= 2) {
min_rate = rfc3390_initial_rate(sk);
min_rate = max(min_rate, 2 * hc->tx_x_recv);
}
if (hc->tx_p > 0) {
hc->tx_x = min(((__u64)hc->tx_x_calc) << 6, min_rate);
hc->tx_x = max(hc->tx_x, (((__u64)hc->tx_s) << 6) / TFRC_T_MBI);
} else if (ktime_us_delta(now, hc->tx_t_ld) - (s64)hc->tx_rtt >= 0) {
hc->tx_x = min(2 * hc->tx_x, min_rate);
hc->tx_x = max(hc->tx_x,
scaled_div(((__u64)hc->tx_s) << 6, hc->tx_rtt));
hc->tx_t_ld = now;
}
if (hc->tx_x != old_x) {
ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, "
"X_recv=%u\n", (unsigned int)(old_x >> 6),
(unsigned int)(hc->tx_x >> 6), hc->tx_x_calc,
(unsigned int)(hc->tx_x_recv >> 6));
ccid3_update_send_interval(hc);
}
}
/**
* ccid3_hc_tx_update_s - Track the mean packet size `s'
* @len: DCCP packet payload size in bytes
*
* cf. RFC 4342, 5.3 and RFC 3448, 4.1
*/
static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hc, int len)
{
const u16 old_s = hc->tx_s;
hc->tx_s = tfrc_ewma(hc->tx_s, len, 9);
if (hc->tx_s != old_s)
ccid3_update_send_interval(hc);
}
/*
* Update Window Counter using the algorithm from [RFC 4342, 8.1].
* As elsewhere, RTT > 0 is assumed by using dccp_sample_rtt().
*/
static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hc,
ktime_t now)
{
u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count),
quarter_rtts = (4 * delta) / hc->tx_rtt;
if (quarter_rtts > 0) {
hc->tx_t_last_win_count = now;
hc->tx_last_win_count += min(quarter_rtts, 5U);
hc->tx_last_win_count &= 0xF; /* mod 16 */
}
}
static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
{
struct sock *sk = (struct sock *)data;
struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
unsigned long t_nfb = USEC_PER_SEC / 5;
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
/* Try again later. */
/* XXX: set some sensible MIB */
goto restart_timer;
}
ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk,
ccid3_tx_state_name(hc->tx_state));
/* Ignore and do not restart after leaving the established state */
if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
goto out;
/* Reset feedback state to "no feedback received" */
if (hc->tx_state == TFRC_SSTATE_FBACK)
ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
/*
* Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4
* RTO is 0 if and only if no feedback has been received yet.
*/
if (hc->tx_t_rto == 0 || hc->tx_p == 0) {
/* halve send rate directly */
hc->tx_x = max(hc->tx_x / 2,
(((__u64)hc->tx_s) << 6) / TFRC_T_MBI);
ccid3_update_send_interval(hc);
} else {
/*
* Modify the cached value of X_recv
*
* If (X_calc > 2 * X_recv)
* X_recv = max(X_recv / 2, s / (2 * t_mbi));
* Else
* X_recv = X_calc / 4;
*
* Note that X_recv is scaled by 2^6 while X_calc is not
*/
if (hc->tx_x_calc > (hc->tx_x_recv >> 5))
hc->tx_x_recv =
max(hc->tx_x_recv / 2,
(((__u64)hc->tx_s) << 6) / (2*TFRC_T_MBI));
else {
hc->tx_x_recv = hc->tx_x_calc;
hc->tx_x_recv <<= 4;
}
ccid3_hc_tx_update_x(sk, NULL);
}
ccid3_pr_debug("Reduced X to %llu/64 bytes/sec\n",
(unsigned long long)hc->tx_x);
/*
* Set new timeout for the nofeedback timer.
* See comments in packet_recv() regarding the value of t_RTO.
*/
if (unlikely(hc->tx_t_rto == 0)) /* no feedback received yet */
t_nfb = TFRC_INITIAL_TIMEOUT;
else
t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi);
restart_timer:
sk_reset_timer(sk, &hc->tx_no_feedback_timer,
jiffies + usecs_to_jiffies(t_nfb));
out:
bh_unlock_sock(sk);
sock_put(sk);
}
/**
* ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets
* @skb: next packet candidate to send on @sk
*
* This function uses the convention of ccid_packet_dequeue_eval() and
* returns a millisecond-delay value between 0 and t_mbi = 64000 msec.
*/
static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
{
struct dccp_sock *dp = dccp_sk(sk);
struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
ktime_t now = ktime_get_real();
s64 delay;
/*
* This function is called only for Data and DataAck packets. Sending
* zero-sized Data(Ack)s is theoretically possible, but for congestion
* control this case is pathological - ignore it.
*/
if (unlikely(skb->len == 0))
return -EBADMSG;
if (hc->tx_state == TFRC_SSTATE_NO_SENT) {
sk_reset_timer(sk, &hc->tx_no_feedback_timer, (jiffies +
usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
hc->tx_last_win_count = 0;
hc->tx_t_last_win_count = now;
/* Set t_0 for initial packet */
hc->tx_t_nom = now;
hc->tx_s = skb->len;
/*
* Use initial RTT sample when available: recommended by erratum
* to RFC 4342. This implements the initialisation procedure of
* draft rfc3448bis, section 4.2. Remember, X is scaled by 2^6.
*/
if (dp->dccps_syn_rtt) {
ccid3_pr_debug("SYN RTT = %uus\n", dp->dccps_syn_rtt);
hc->tx_rtt = dp->dccps_syn_rtt;
hc->tx_x = rfc3390_initial_rate(sk);
hc->tx_t_ld = now;
} else {
/*
* Sender does not have RTT sample:
* - set fallback RTT (RFC 4340, 3.4) since a RTT value
* is needed in several parts (e.g. window counter);
* - set sending rate X_pps = 1pps as per RFC 3448, 4.2.
*/
hc->tx_rtt = DCCP_FALLBACK_RTT;
hc->tx_x = hc->tx_s;
hc->tx_x <<= 6;
}
ccid3_update_send_interval(hc);
ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
} else {
delay = ktime_us_delta(hc->tx_t_nom, now);
ccid3_pr_debug("delay=%ld\n", (long)delay);
/*
* Scheduling of packet transmissions (RFC 5348, 8.3)
*
* if (t_now > t_nom - delta)
* // send the packet now
* else
* // send the packet in (t_nom - t_now) milliseconds.
*/
if (delay >= TFRC_T_DELTA)
return (u32)delay / USEC_PER_MSEC;
ccid3_hc_tx_update_win_count(hc, now);
}
/* prepare to send now (add options etc.) */
dp->dccps_hc_tx_insert_options = 1;
DCCP_SKB_CB(skb)->dccpd_ccval = hc->tx_last_win_count;
/* set the nominal send time for the next following packet */
hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi);
return CCID_PACKET_SEND_AT_ONCE;
}
static void ccid3_hc_tx_packet_sent(struct sock *sk, unsigned int len)
{
struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
ccid3_hc_tx_update_s(hc, len);
if (tfrc_tx_hist_add(&hc->tx_hist, dccp_sk(sk)->dccps_gss))
DCCP_CRIT("packet history - out of memory!");
}
static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
{
struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
struct tfrc_tx_hist_entry *acked;
ktime_t now;
unsigned long t_nfb;
u32 r_sample;
/* we are only interested in ACKs */
if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK))
return;
/*
* Locate the acknowledged packet in the TX history.
*
* Returning "entry not found" here can for instance happen when
* - the host has not sent out anything (e.g. a passive server),
* - the Ack is outdated (packet with higher Ack number was received),
* - it is a bogus Ack (for a packet not sent on this connection).
*/
acked = tfrc_tx_hist_find_entry(hc->tx_hist, dccp_hdr_ack_seq(skb));
if (acked == NULL)
return;
/* For the sake of RTT sampling, ignore/remove all older entries */
tfrc_tx_hist_purge(&acked->next);
/* Update the moving average for the RTT estimate (RFC 3448, 4.3) */
now = ktime_get_real();
r_sample = dccp_sample_rtt(sk, ktime_us_delta(now, acked->stamp));
hc->tx_rtt = tfrc_ewma(hc->tx_rtt, r_sample, 9);
/*
* Update allowed sending rate X as per draft rfc3448bis-00, 4.2/3
*/
if (hc->tx_state == TFRC_SSTATE_NO_FBACK) {
ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
if (hc->tx_t_rto == 0) {
/*
* Initial feedback packet: Larger Initial Windows (4.2)
*/
hc->tx_x = rfc3390_initial_rate(sk);
hc->tx_t_ld = now;
ccid3_update_send_interval(hc);
goto done_computing_x;
} else if (hc->tx_p == 0) {
/*
* First feedback after nofeedback timer expiry (4.3)
*/
goto done_computing_x;
}
}
/* Update sending rate (step 4 of [RFC 3448, 4.3]) */
if (hc->tx_p > 0)
hc->tx_x_calc = tfrc_calc_x(hc->tx_s, hc->tx_rtt, hc->tx_p);
ccid3_hc_tx_update_x(sk, &now);
done_computing_x:
ccid3_pr_debug("%s(%p), RTT=%uus (sample=%uus), s=%u, "
"p=%u, X_calc=%u, X_recv=%u, X=%u\n",
dccp_role(sk), sk, hc->tx_rtt, r_sample,
hc->tx_s, hc->tx_p, hc->tx_x_calc,
(unsigned int)(hc->tx_x_recv >> 6),
(unsigned int)(hc->tx_x >> 6));
/* unschedule no feedback timer */
sk_stop_timer(sk, &hc->tx_no_feedback_timer);
/*
* As we have calculated new ipi, delta, t_nom it is possible
* that we now can send a packet, so wake up dccp_wait_for_ccid
*/
sk->sk_write_space(sk);
/*
* Update timeout interval for the nofeedback timer. In order to control
* rate halving on networks with very low RTTs (<= 1 ms), use per-route
* tunable RTAX_RTO_MIN value as the lower bound.
*/
hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt,
USEC_PER_SEC/HZ * tcp_rto_min(sk));
/*
* Schedule no feedback timer to expire in
* max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi)
*/
t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi);
ccid3_pr_debug("%s(%p), Scheduled no feedback timer to "
"expire in %lu jiffies (%luus)\n",
dccp_role(sk), sk, usecs_to_jiffies(t_nfb), t_nfb);
sk_reset_timer(sk, &hc->tx_no_feedback_timer,
jiffies + usecs_to_jiffies(t_nfb));
}
static int ccid3_hc_tx_parse_options(struct sock *sk, u8 packet_type,
u8 option, u8 *optval, u8 optlen)
{
struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
__be32 opt_val;
switch (option) {
case TFRC_OPT_RECEIVE_RATE:
case TFRC_OPT_LOSS_EVENT_RATE:
/* Must be ignored on Data packets, cf. RFC 4342 8.3 and 8.5 */
if (packet_type == DCCP_PKT_DATA)
break;
if (unlikely(optlen != 4)) {
DCCP_WARN("%s(%p), invalid len %d for %u\n",
dccp_role(sk), sk, optlen, option);
return -EINVAL;
}
opt_val = ntohl(get_unaligned((__be32 *)optval));
if (option == TFRC_OPT_RECEIVE_RATE) {
/* Receive Rate is kept in units of 64 bytes/second */
hc->tx_x_recv = opt_val;
hc->tx_x_recv <<= 6;
ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n",
dccp_role(sk), sk, opt_val);
} else {
/* Update the fixpoint Loss Event Rate fraction */
hc->tx_p = tfrc_invert_loss_event_rate(opt_val);
ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
dccp_role(sk), sk, opt_val);
}
}
return 0;
}
static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
{
struct ccid3_hc_tx_sock *hc = ccid_priv(ccid);
hc->tx_state = TFRC_SSTATE_NO_SENT;
hc->tx_hist = NULL;
setup_timer(&hc->tx_no_feedback_timer,
ccid3_hc_tx_no_feedback_timer, (unsigned long)sk);
return 0;
}
static void ccid3_hc_tx_exit(struct sock *sk)
{
struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
sk_stop_timer(sk, &hc->tx_no_feedback_timer);
tfrc_tx_hist_purge(&hc->tx_hist);
}
static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
{
info->tcpi_rto = ccid3_hc_tx_sk(sk)->tx_t_rto;
info->tcpi_rtt = ccid3_hc_tx_sk(sk)->tx_rtt;
}
static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
u32 __user *optval, int __user *optlen)
{
const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
struct tfrc_tx_info tfrc;
const void *val;
switch (optname) {
case DCCP_SOCKOPT_CCID_TX_INFO:
if (len < sizeof(tfrc))
return -EINVAL;
memset(&tfrc, 0, sizeof(tfrc));
tfrc.tfrctx_x = hc->tx_x;
tfrc.tfrctx_x_recv = hc->tx_x_recv;
tfrc.tfrctx_x_calc = hc->tx_x_calc;
tfrc.tfrctx_rtt = hc->tx_rtt;
tfrc.tfrctx_p = hc->tx_p;
tfrc.tfrctx_rto = hc->tx_t_rto;
tfrc.tfrctx_ipi = hc->tx_t_ipi;
len = sizeof(tfrc);
val = &tfrc;
break;
default:
return -ENOPROTOOPT;
}
if (put_user(len, optlen) || copy_to_user(optval, val, len))
return -EFAULT;
return 0;
}
/*
* Receiver Half-Connection Routines
*/
/* CCID3 feedback types */
enum ccid3_fback_type {
CCID3_FBACK_NONE = 0,
CCID3_FBACK_INITIAL,
CCID3_FBACK_PERIODIC,
CCID3_FBACK_PARAM_CHANGE
};
#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
{
static const char *const ccid3_rx_state_names[] = {
[TFRC_RSTATE_NO_DATA] = "NO_DATA",
[TFRC_RSTATE_DATA] = "DATA",
};
return ccid3_rx_state_names[state];
}
#endif
static void ccid3_hc_rx_set_state(struct sock *sk,
enum ccid3_hc_rx_states state)
{
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
enum ccid3_hc_rx_states oldstate = hc->rx_state;
ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
dccp_role(sk), sk, ccid3_rx_state_name(oldstate),
ccid3_rx_state_name(state));
WARN_ON(state == oldstate);
hc->rx_state = state;
}
static void ccid3_hc_rx_send_feedback(struct sock *sk,
const struct sk_buff *skb,
enum ccid3_fback_type fbtype)
{
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
ktime_t now = ktime_get_real();
s64 delta = 0;
switch (fbtype) {
case CCID3_FBACK_INITIAL:
hc->rx_x_recv = 0;
hc->rx_pinv = ~0U; /* see RFC 4342, 8.5 */
break;
case CCID3_FBACK_PARAM_CHANGE:
/*
* When parameters change (new loss or p > p_prev), we do not
* have a reliable estimate for R_m of [RFC 3448, 6.2] and so
* need to reuse the previous value of X_recv. However, when
* X_recv was 0 (due to early loss), this would kill X down to
* s/t_mbi (i.e. one packet in 64 seconds).
* To avoid such drastic reduction, we approximate X_recv as
* the number of bytes since last feedback.
* This is a safe fallback, since X is bounded above by X_calc.
*/
if (hc->rx_x_recv > 0)
break;
/* fall through */
case CCID3_FBACK_PERIODIC:
delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
if (delta <= 0)
DCCP_BUG("delta (%ld) <= 0", (long)delta);
else
hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
break;
default:
return;
}
ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
hc->rx_x_recv, hc->rx_pinv);
hc->rx_tstamp_last_feedback = now;
hc->rx_last_counter = dccp_hdr(skb)->dccph_ccval;
hc->rx_bytes_recv = 0;
dp->dccps_hc_rx_insert_options = 1;
dccp_send_ack(sk);
}
static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
{
const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
__be32 x_recv, pinv;
if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
return 0;
if (dccp_packet_without_ack(skb))
return 0;
x_recv = htonl(hc->rx_x_recv);
pinv = htonl(hc->rx_pinv);
if (dccp_insert_option(skb, TFRC_OPT_LOSS_EVENT_RATE,
&pinv, sizeof(pinv)) ||
dccp_insert_option(skb, TFRC_OPT_RECEIVE_RATE,
&x_recv, sizeof(x_recv)))
return -1;
return 0;
}
/**
* ccid3_first_li - Implements [RFC 5348, 6.3.1]
*
* Determine the length of the first loss interval via inverse lookup.
* Assume that X_recv can be computed by the throughput equation
* s
* X_recv = --------
* R * fval
* Find some p such that f(p) = fval; return 1/p (scaled).
*/
static u32 ccid3_first_li(struct sock *sk)
{
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
u32 x_recv, p, delta;
u64 fval;
if (hc->rx_rtt == 0) {
DCCP_WARN("No RTT estimate available, using fallback RTT\n");
hc->rx_rtt = DCCP_FALLBACK_RTT;
}
delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
x_recv = scaled_div32(hc->rx_bytes_recv, delta);
if (x_recv == 0) { /* would also trigger divide-by-zero */
DCCP_WARN("X_recv==0\n");
if (hc->rx_x_recv == 0) {
DCCP_BUG("stored value of X_recv is zero");
return ~0U;
}
x_recv = hc->rx_x_recv;
}
fval = scaled_div(hc->rx_s, hc->rx_rtt);
fval = scaled_div32(fval, x_recv);
p = tfrc_calc_x_reverse_lookup(fval);
ccid3_pr_debug("%s(%p), receive rate=%u bytes/s, implied "
"loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
return p == 0 ? ~0U : scaled_div(1, p);
}
static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
{
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
enum ccid3_fback_type do_feedback = CCID3_FBACK_NONE;
const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp;
const bool is_data_packet = dccp_data_packet(skb);
if (unlikely(hc->rx_state == TFRC_RSTATE_NO_DATA)) {
if (is_data_packet) {
const u32 payload = skb->len - dccp_hdr(skb)->dccph_doff * 4;
do_feedback = CCID3_FBACK_INITIAL;
ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA);
hc->rx_s = payload;
/*
* Not necessary to update rx_bytes_recv here,
* since X_recv = 0 for the first feedback packet (cf.
* RFC 3448, 6.3) -- gerrit
*/
}
goto update_records;
}
if (tfrc_rx_hist_duplicate(&hc->rx_hist, skb))
return; /* done receiving */
if (is_data_packet) {
const u32 payload = skb->len - dccp_hdr(skb)->dccph_doff * 4;
/*
* Update moving-average of s and the sum of received payload bytes
*/
hc->rx_s = tfrc_ewma(hc->rx_s, payload, 9);
hc->rx_bytes_recv += payload;
}
/*
* Perform loss detection and handle pending losses
*/
if (tfrc_rx_handle_loss(&hc->rx_hist, &hc->rx_li_hist,
skb, ndp, ccid3_first_li, sk)) {
do_feedback = CCID3_FBACK_PARAM_CHANGE;
goto done_receiving;
}
if (tfrc_rx_hist_loss_pending(&hc->rx_hist))
return; /* done receiving */
/*
* Handle data packets: RTT sampling and monitoring p
*/
if (unlikely(!is_data_packet))
goto update_records;
if (!tfrc_lh_is_initialised(&hc->rx_li_hist)) {
const u32 sample = tfrc_rx_hist_sample_rtt(&hc->rx_hist, skb);
/*
* Empty loss history: no loss so far, hence p stays 0.
* Sample RTT values, since an RTT estimate is required for the
* computation of p when the first loss occurs; RFC 3448, 6.3.1.
*/
if (sample != 0)
hc->rx_rtt = tfrc_ewma(hc->rx_rtt, sample, 9);
} else if (tfrc_lh_update_i_mean(&hc->rx_li_hist, skb)) {
/*
* Step (3) of [RFC 3448, 6.1]: Recompute I_mean and, if I_mean
* has decreased (resp. p has increased), send feedback now.
*/
do_feedback = CCID3_FBACK_PARAM_CHANGE;
}
/*
* Check if the periodic once-per-RTT feedback is due; RFC 4342, 10.3
*/
if (SUB16(dccp_hdr(skb)->dccph_ccval, hc->rx_last_counter) > 3)
do_feedback = CCID3_FBACK_PERIODIC;
update_records:
tfrc_rx_hist_add_packet(&hc->rx_hist, skb, ndp);
done_receiving:
if (do_feedback)
ccid3_hc_rx_send_feedback(sk, skb, do_feedback);
}
static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
{
struct ccid3_hc_rx_sock *hc = ccid_priv(ccid);
hc->rx_state = TFRC_RSTATE_NO_DATA;
tfrc_lh_init(&hc->rx_li_hist);
return tfrc_rx_hist_alloc(&hc->rx_hist);
}
static void ccid3_hc_rx_exit(struct sock *sk)
{
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
tfrc_rx_hist_purge(&hc->rx_hist);
tfrc_lh_cleanup(&hc->rx_li_hist);
}
static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
{
info->tcpi_ca_state = ccid3_hc_rx_sk(sk)->rx_state;
info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
info->tcpi_rcv_rtt = ccid3_hc_rx_sk(sk)->rx_rtt;
}
static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
u32 __user *optval, int __user *optlen)
{
const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
struct tfrc_rx_info rx_info;
const void *val;
switch (optname) {
case DCCP_SOCKOPT_CCID_RX_INFO:
if (len < sizeof(rx_info))
return -EINVAL;
rx_info.tfrcrx_x_recv = hc->rx_x_recv;
rx_info.tfrcrx_rtt = hc->rx_rtt;
rx_info.tfrcrx_p = tfrc_invert_loss_event_rate(hc->rx_pinv);
len = sizeof(rx_info);
val = &rx_info;
break;
default:
return -ENOPROTOOPT;
}
if (put_user(len, optlen) || copy_to_user(optval, val, len))
return -EFAULT;
return 0;
}
struct ccid_operations ccid3_ops = {
.ccid_id = DCCPC_CCID3,
.ccid_name = "TCP-Friendly Rate Control",
.ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock),
.ccid_hc_tx_init = ccid3_hc_tx_init,
.ccid_hc_tx_exit = ccid3_hc_tx_exit,
.ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet,
.ccid_hc_tx_packet_sent = ccid3_hc_tx_packet_sent,
.ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv,
.ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options,
.ccid_hc_rx_obj_size = sizeof(struct ccid3_hc_rx_sock),
.ccid_hc_rx_init = ccid3_hc_rx_init,
.ccid_hc_rx_exit = ccid3_hc_rx_exit,
.ccid_hc_rx_insert_options = ccid3_hc_rx_insert_options,
.ccid_hc_rx_packet_recv = ccid3_hc_rx_packet_recv,
.ccid_hc_rx_get_info = ccid3_hc_rx_get_info,
.ccid_hc_tx_get_info = ccid3_hc_tx_get_info,
.ccid_hc_rx_getsockopt = ccid3_hc_rx_getsockopt,
.ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt,
};
#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
module_param(ccid3_debug, bool, 0644);
MODULE_PARM_DESC(ccid3_debug, "Enable CCID-3 debug messages");
#endif
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_3829_0 |
crossvul-cpp_data_bad_5058_0 | /*
* Timers abstract layer
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/string.h>
#include <sound/core.h>
#include <sound/timer.h>
#include <sound/control.h>
#include <sound/info.h>
#include <sound/minors.h>
#include <sound/initval.h>
#include <linux/kmod.h>
#if IS_ENABLED(CONFIG_SND_HRTIMER)
#define DEFAULT_TIMER_LIMIT 4
#else
#define DEFAULT_TIMER_LIMIT 1
#endif
static int timer_limit = DEFAULT_TIMER_LIMIT;
static int timer_tstamp_monotonic = 1;
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>");
MODULE_DESCRIPTION("ALSA timer interface");
MODULE_LICENSE("GPL");
module_param(timer_limit, int, 0444);
MODULE_PARM_DESC(timer_limit, "Maximum global timers in system.");
module_param(timer_tstamp_monotonic, int, 0444);
MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default).");
MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER);
MODULE_ALIAS("devname:snd/timer");
struct snd_timer_user {
struct snd_timer_instance *timeri;
int tread; /* enhanced read with timestamps and events */
unsigned long ticks;
unsigned long overrun;
int qhead;
int qtail;
int qused;
int queue_size;
bool disconnected;
struct snd_timer_read *queue;
struct snd_timer_tread *tqueue;
spinlock_t qlock;
unsigned long last_resolution;
unsigned int filter;
struct timespec tstamp; /* trigger tstamp */
wait_queue_head_t qchange_sleep;
struct fasync_struct *fasync;
struct mutex ioctl_lock;
};
/* list of timers */
static LIST_HEAD(snd_timer_list);
/* list of slave instances */
static LIST_HEAD(snd_timer_slave_list);
/* lock for slave active lists */
static DEFINE_SPINLOCK(slave_active_lock);
static DEFINE_MUTEX(register_mutex);
static int snd_timer_free(struct snd_timer *timer);
static int snd_timer_dev_free(struct snd_device *device);
static int snd_timer_dev_register(struct snd_device *device);
static int snd_timer_dev_disconnect(struct snd_device *device);
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left);
/*
* create a timer instance with the given owner string.
* when timer is not NULL, increments the module counter
*/
static struct snd_timer_instance *snd_timer_instance_new(char *owner,
struct snd_timer *timer)
{
struct snd_timer_instance *timeri;
timeri = kzalloc(sizeof(*timeri), GFP_KERNEL);
if (timeri == NULL)
return NULL;
timeri->owner = kstrdup(owner, GFP_KERNEL);
if (! timeri->owner) {
kfree(timeri);
return NULL;
}
INIT_LIST_HEAD(&timeri->open_list);
INIT_LIST_HEAD(&timeri->active_list);
INIT_LIST_HEAD(&timeri->ack_list);
INIT_LIST_HEAD(&timeri->slave_list_head);
INIT_LIST_HEAD(&timeri->slave_active_head);
timeri->timer = timer;
if (timer && !try_module_get(timer->module)) {
kfree(timeri->owner);
kfree(timeri);
return NULL;
}
return timeri;
}
/*
* find a timer instance from the given timer id
*/
static struct snd_timer *snd_timer_find(struct snd_timer_id *tid)
{
struct snd_timer *timer = NULL;
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->tmr_class != tid->dev_class)
continue;
if ((timer->tmr_class == SNDRV_TIMER_CLASS_CARD ||
timer->tmr_class == SNDRV_TIMER_CLASS_PCM) &&
(timer->card == NULL ||
timer->card->number != tid->card))
continue;
if (timer->tmr_device != tid->device)
continue;
if (timer->tmr_subdevice != tid->subdevice)
continue;
return timer;
}
return NULL;
}
#ifdef CONFIG_MODULES
static void snd_timer_request(struct snd_timer_id *tid)
{
switch (tid->dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
if (tid->device < timer_limit)
request_module("snd-timer-%i", tid->device);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (tid->card < snd_ecards_limit)
request_module("snd-card-%i", tid->card);
break;
default:
break;
}
}
#endif
/*
* look for a master instance matching with the slave id of the given slave.
* when found, relink the open_link of the slave.
*
* call this with register_mutex down.
*/
static void snd_timer_check_slave(struct snd_timer_instance *slave)
{
struct snd_timer *timer;
struct snd_timer_instance *master;
/* FIXME: it's really dumb to look up all entries.. */
list_for_each_entry(timer, &snd_timer_list, device_list) {
list_for_each_entry(master, &timer->open_list_head, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list,
&master->slave_list_head);
spin_lock_irq(&slave_active_lock);
slave->master = master;
slave->timer = master->timer;
spin_unlock_irq(&slave_active_lock);
return;
}
}
}
}
/*
* look for slave instances matching with the slave id of the given master.
* when found, relink the open_link of slaves.
*
* call this with register_mutex down.
*/
static void snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
/* check all pending slaves */
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list, &master->slave_list_head);
spin_lock_irq(&slave_active_lock);
spin_lock(&master->timer->lock);
slave->master = master;
slave->timer = master->timer;
if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
list_add_tail(&slave->active_list,
&master->slave_active_head);
spin_unlock(&master->timer->lock);
spin_unlock_irq(&slave_active_lock);
}
}
}
/*
* open a timer instance
* when opening a master, the slave id must be here given.
*/
int snd_timer_open(struct snd_timer_instance **ti,
char *owner, struct snd_timer_id *tid,
unsigned int slave_id)
{
struct snd_timer *timer;
struct snd_timer_instance *timeri = NULL;
if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
/* open a slave instance */
if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE ||
tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) {
pr_debug("ALSA: timer: invalid slave class %i\n",
tid->dev_sclass);
return -EINVAL;
}
mutex_lock(®ister_mutex);
timeri = snd_timer_instance_new(owner, NULL);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
snd_timer_check_slave(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
/* open a master instance */
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
#ifdef CONFIG_MODULES
if (!timer) {
mutex_unlock(®ister_mutex);
snd_timer_request(tid);
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
}
#endif
if (!timer) {
mutex_unlock(®ister_mutex);
return -ENODEV;
}
if (!list_empty(&timer->open_list_head)) {
timeri = list_entry(timer->open_list_head.next,
struct snd_timer_instance, open_list);
if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
mutex_unlock(®ister_mutex);
return -EBUSY;
}
}
timeri = snd_timer_instance_new(owner, timer);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
/* take a card refcount for safe disconnection */
if (timer->card)
get_device(&timer->card->card_dev);
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = slave_id;
if (list_empty(&timer->open_list_head) && timer->hw.open)
timer->hw.open(timer);
list_add_tail(&timeri->open_list, &timer->open_list_head);
snd_timer_check_master(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
/*
* close a timer instance
*/
int snd_timer_close(struct snd_timer_instance *timeri)
{
struct snd_timer *timer = NULL;
struct snd_timer_instance *slave, *tmp;
if (snd_BUG_ON(!timeri))
return -ENXIO;
mutex_lock(®ister_mutex);
list_del(&timeri->open_list);
/* force to stop the timer */
snd_timer_stop(timeri);
timer = timeri->timer;
if (timer) {
/* wait, until the active callback is finished */
spin_lock_irq(&timer->lock);
while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
spin_unlock_irq(&timer->lock);
udelay(10);
spin_lock_irq(&timer->lock);
}
spin_unlock_irq(&timer->lock);
/* remove slave links */
spin_lock_irq(&slave_active_lock);
spin_lock(&timer->lock);
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
open_list) {
list_move_tail(&slave->open_list, &snd_timer_slave_list);
slave->master = NULL;
slave->timer = NULL;
list_del_init(&slave->ack_list);
list_del_init(&slave->active_list);
}
spin_unlock(&timer->lock);
spin_unlock_irq(&slave_active_lock);
/* slave doesn't need to release timer resources below */
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
timer = NULL;
}
if (timeri->private_free)
timeri->private_free(timeri);
kfree(timeri->owner);
kfree(timeri);
if (timer) {
if (list_empty(&timer->open_list_head) && timer->hw.close)
timer->hw.close(timer);
/* release a card refcount for safe disconnection */
if (timer->card)
put_device(&timer->card->card_dev);
module_put(timer->module);
}
mutex_unlock(®ister_mutex);
return 0;
}
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
{
struct snd_timer * timer;
if (timeri == NULL)
return 0;
if ((timer = timeri->timer) != NULL) {
if (timer->hw.c_resolution)
return timer->hw.c_resolution(timer);
return timer->hw.resolution;
}
return 0;
}
static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
{
struct snd_timer *timer;
unsigned long resolution = 0;
struct snd_timer_instance *ts;
struct timespec tstamp;
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START ||
event > SNDRV_TIMER_EVENT_PAUSE))
return;
if (event == SNDRV_TIMER_EVENT_START ||
event == SNDRV_TIMER_EVENT_CONTINUE)
resolution = snd_timer_resolution(ti);
if (ti->ccallback)
ti->ccallback(ti, event, &tstamp, resolution);
if (ti->flags & SNDRV_TIMER_IFLG_SLAVE)
return;
timer = ti->timer;
if (timer == NULL)
return;
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
return;
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event + 100, &tstamp, resolution);
}
/* start/continue a master timer */
static int snd_timer_start1(struct snd_timer_instance *timeri,
bool start, unsigned long ticks)
{
struct snd_timer *timer;
int result;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (timer->card && timer->card->shutdown) {
result = -ENODEV;
goto unlock;
}
if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START)) {
result = -EBUSY;
goto unlock;
}
if (start)
timeri->ticks = timeri->cticks = ticks;
else if (!timeri->cticks)
timeri->cticks = 1;
timeri->pticks = 0;
list_move_tail(&timeri->active_list, &timer->active_list_head);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
goto __start_now;
timer->flags |= SNDRV_TIMER_FLG_RESCHED;
timeri->flags |= SNDRV_TIMER_IFLG_START;
result = 1; /* delayed start */
} else {
if (start)
timer->sticks = ticks;
timer->hw.start(timer);
__start_now:
timer->running++;
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
result = 0;
}
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* start/continue a slave timer */
static int snd_timer_start_slave(struct snd_timer_instance *timeri,
bool start)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master && timeri->timer) {
spin_lock(&timeri->timer->lock);
list_add_tail(&timeri->active_list,
&timeri->master->slave_active_head);
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 1; /* delayed start */
}
/* stop/pause a master timer */
static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
{
struct snd_timer *timer;
int result = 0;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START))) {
result = -EBUSY;
goto unlock;
}
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if (timer->card && timer->card->shutdown)
goto unlock;
if (stop) {
timeri->cticks = timeri->ticks;
timeri->pticks = 0;
}
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
timer->hw.stop(timer);
if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
snd_timer_reschedule(timer, 0);
if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
}
}
timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* stop/pause a slave timer */
static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
if (timeri->timer) {
spin_lock(&timeri->timer->lock);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 0;
}
/*
* start the timer instance
*/
int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
{
if (timeri == NULL || ticks < 1)
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, true);
else
return snd_timer_start1(timeri, true, ticks);
}
/*
* stop the timer instance.
*
* do not call this from the timer callback!
*/
int snd_timer_stop(struct snd_timer_instance *timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, true);
else
return snd_timer_stop1(timeri, true);
}
/*
* start again.. the tick is kept.
*/
int snd_timer_continue(struct snd_timer_instance *timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, false);
else
return snd_timer_start1(timeri, false, 0);
}
/*
* pause.. remember the ticks left
*/
int snd_timer_pause(struct snd_timer_instance * timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, false);
else
return snd_timer_stop1(timeri, false);
}
/*
* reschedule the timer
*
* start pending instances and check the scheduling ticks.
* when the scheduling ticks is changed set CHANGE flag to reprogram the timer.
*/
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti;
unsigned long ticks = ~0UL;
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->flags & SNDRV_TIMER_IFLG_START) {
ti->flags &= ~SNDRV_TIMER_IFLG_START;
ti->flags |= SNDRV_TIMER_IFLG_RUNNING;
timer->running++;
}
if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) {
if (ticks > ti->cticks)
ticks = ti->cticks;
}
}
if (ticks == ~0UL) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
return;
}
if (ticks > timer->hw.ticks)
ticks = timer->hw.ticks;
if (ticks_left != ticks)
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
timer->sticks = ticks;
}
/*
* timer tasklet
*
*/
static void snd_timer_tasklet(unsigned long arg)
{
struct snd_timer *timer = (struct snd_timer *) arg;
struct snd_timer_instance *ti;
struct list_head *p;
unsigned long resolution, ticks;
unsigned long flags;
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* now process all callbacks */
while (!list_empty(&timer->sack_list_head)) {
p = timer->sack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
resolution = ti->resolution;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* timer interrupt
*
* ticks_left is usually equal to timer->sticks.
*
*/
void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti, *ts, *tmp;
unsigned long resolution, ticks;
struct list_head *p, *ack_list_head;
unsigned long flags;
int use_tasklet = 0;
if (timer == NULL)
return;
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* remember the current resolution */
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
/* loop for all active instances
* Here we cannot use list_for_each_entry because the active_list of a
* processed instance is relinked to done_list_head before the callback
* is called.
*/
list_for_each_entry_safe(ti, tmp, &timer->active_list_head,
active_list) {
if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING))
continue;
ti->pticks += ticks_left;
ti->resolution = resolution;
if (ti->cticks < ticks_left)
ti->cticks = 0;
else
ti->cticks -= ticks_left;
if (ti->cticks) /* not expired */
continue;
if (ti->flags & SNDRV_TIMER_IFLG_AUTO) {
ti->cticks = ti->ticks;
} else {
ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
--timer->running;
list_del_init(&ti->active_list);
}
if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
(ti->flags & SNDRV_TIMER_IFLG_FAST))
ack_list_head = &timer->ack_list_head;
else
ack_list_head = &timer->sack_list_head;
if (list_empty(&ti->ack_list))
list_add_tail(&ti->ack_list, ack_list_head);
list_for_each_entry(ts, &ti->slave_active_head, active_list) {
ts->pticks = ti->pticks;
ts->resolution = resolution;
if (list_empty(&ts->ack_list))
list_add_tail(&ts->ack_list, ack_list_head);
}
}
if (timer->flags & SNDRV_TIMER_FLG_RESCHED)
snd_timer_reschedule(timer, timer->sticks);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_STOP) {
timer->hw.stop(timer);
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
}
if (!(timer->hw.flags & SNDRV_TIMER_HW_AUTO) ||
(timer->flags & SNDRV_TIMER_FLG_CHANGE)) {
/* restart timer */
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
} else {
timer->hw.stop(timer);
}
/* now process all fast callbacks */
while (!list_empty(&timer->ack_list_head)) {
p = timer->ack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
/* do we have any slow callbacks? */
use_tasklet = !list_empty(&timer->sack_list_head);
spin_unlock_irqrestore(&timer->lock, flags);
if (use_tasklet)
tasklet_schedule(&timer->task_queue);
}
/*
*/
int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
struct snd_timer **rtimer)
{
struct snd_timer *timer;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_timer_dev_free,
.dev_register = snd_timer_dev_register,
.dev_disconnect = snd_timer_dev_disconnect,
};
if (snd_BUG_ON(!tid))
return -EINVAL;
if (rtimer)
*rtimer = NULL;
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
timer->tmr_class = tid->dev_class;
timer->card = card;
timer->tmr_device = tid->device;
timer->tmr_subdevice = tid->subdevice;
if (id)
strlcpy(timer->id, id, sizeof(timer->id));
INIT_LIST_HEAD(&timer->device_list);
INIT_LIST_HEAD(&timer->open_list_head);
INIT_LIST_HEAD(&timer->active_list_head);
INIT_LIST_HEAD(&timer->ack_list_head);
INIT_LIST_HEAD(&timer->sack_list_head);
spin_lock_init(&timer->lock);
tasklet_init(&timer->task_queue, snd_timer_tasklet,
(unsigned long)timer);
if (card != NULL) {
timer->module = card->module;
err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
if (err < 0) {
snd_timer_free(timer);
return err;
}
}
if (rtimer)
*rtimer = timer;
return 0;
}
static int snd_timer_free(struct snd_timer *timer)
{
if (!timer)
return 0;
mutex_lock(®ister_mutex);
if (! list_empty(&timer->open_list_head)) {
struct list_head *p, *n;
struct snd_timer_instance *ti;
pr_warn("ALSA: timer %p is busy?\n", timer);
list_for_each_safe(p, n, &timer->open_list_head) {
list_del_init(p);
ti = list_entry(p, struct snd_timer_instance, open_list);
ti->timer = NULL;
}
}
list_del(&timer->device_list);
mutex_unlock(®ister_mutex);
if (timer->private_free)
timer->private_free(timer);
kfree(timer);
return 0;
}
static int snd_timer_dev_free(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
return snd_timer_free(timer);
}
static int snd_timer_dev_register(struct snd_device *dev)
{
struct snd_timer *timer = dev->device_data;
struct snd_timer *timer1;
if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop))
return -ENXIO;
if (!(timer->hw.flags & SNDRV_TIMER_HW_SLAVE) &&
!timer->hw.resolution && timer->hw.c_resolution == NULL)
return -EINVAL;
mutex_lock(®ister_mutex);
list_for_each_entry(timer1, &snd_timer_list, device_list) {
if (timer1->tmr_class > timer->tmr_class)
break;
if (timer1->tmr_class < timer->tmr_class)
continue;
if (timer1->card && timer->card) {
if (timer1->card->number > timer->card->number)
break;
if (timer1->card->number < timer->card->number)
continue;
}
if (timer1->tmr_device > timer->tmr_device)
break;
if (timer1->tmr_device < timer->tmr_device)
continue;
if (timer1->tmr_subdevice > timer->tmr_subdevice)
break;
if (timer1->tmr_subdevice < timer->tmr_subdevice)
continue;
/* conflicts.. */
mutex_unlock(®ister_mutex);
return -EBUSY;
}
list_add_tail(&timer->device_list, &timer1->device_list);
mutex_unlock(®ister_mutex);
return 0;
}
static int snd_timer_dev_disconnect(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_del_init(&timer->device_list);
/* wake up pending sleepers */
list_for_each_entry(ti, &timer->open_list_head, open_list) {
if (ti->disconnect)
ti->disconnect(ti);
}
mutex_unlock(®ister_mutex);
return 0;
}
void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp)
{
unsigned long flags;
unsigned long resolution = 0;
struct snd_timer_instance *ti, *ts;
if (timer->card && timer->card->shutdown)
return;
if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
return;
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
event > SNDRV_TIMER_EVENT_MRESUME))
return;
spin_lock_irqsave(&timer->lock, flags);
if (event == SNDRV_TIMER_EVENT_MSTART ||
event == SNDRV_TIMER_EVENT_MCONTINUE ||
event == SNDRV_TIMER_EVENT_MRESUME) {
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
}
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->ccallback)
ti->ccallback(ti, event, tstamp, resolution);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event, tstamp, resolution);
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* exported functions for global timers
*/
int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer)
{
struct snd_timer_id tid;
tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL;
tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE;
tid.card = -1;
tid.device = device;
tid.subdevice = 0;
return snd_timer_new(NULL, id, &tid, rtimer);
}
int snd_timer_global_free(struct snd_timer *timer)
{
return snd_timer_free(timer);
}
int snd_timer_global_register(struct snd_timer *timer)
{
struct snd_device dev;
memset(&dev, 0, sizeof(dev));
dev.device_data = timer;
return snd_timer_dev_register(&dev);
}
/*
* System timer
*/
struct snd_timer_system_private {
struct timer_list tlist;
unsigned long last_expires;
unsigned long last_jiffies;
unsigned long correction;
};
static void snd_timer_s_function(unsigned long data)
{
struct snd_timer *timer = (struct snd_timer *)data;
struct snd_timer_system_private *priv = timer->private_data;
unsigned long jiff = jiffies;
if (time_after(jiff, priv->last_expires))
priv->correction += (long)jiff - (long)priv->last_expires;
snd_timer_interrupt(timer, (long)jiff - (long)priv->last_jiffies);
}
static int snd_timer_s_start(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long njiff;
priv = (struct snd_timer_system_private *) timer->private_data;
njiff = (priv->last_jiffies = jiffies);
if (priv->correction > timer->sticks - 1) {
priv->correction -= timer->sticks - 1;
njiff++;
} else {
njiff += timer->sticks - priv->correction;
priv->correction = 0;
}
priv->last_expires = njiff;
mod_timer(&priv->tlist, njiff);
return 0;
}
static int snd_timer_s_stop(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long jiff;
priv = (struct snd_timer_system_private *) timer->private_data;
del_timer(&priv->tlist);
jiff = jiffies;
if (time_before(jiff, priv->last_expires))
timer->sticks = priv->last_expires - jiff;
else
timer->sticks = 1;
priv->correction = 0;
return 0;
}
static int snd_timer_s_close(struct snd_timer *timer)
{
struct snd_timer_system_private *priv;
priv = (struct snd_timer_system_private *)timer->private_data;
del_timer_sync(&priv->tlist);
return 0;
}
static struct snd_timer_hardware snd_timer_system =
{
.flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET,
.resolution = 1000000000L / HZ,
.ticks = 10000000L,
.close = snd_timer_s_close,
.start = snd_timer_s_start,
.stop = snd_timer_s_stop
};
static void snd_timer_free_system(struct snd_timer *timer)
{
kfree(timer->private_data);
}
static int snd_timer_register_system(void)
{
struct snd_timer *timer;
struct snd_timer_system_private *priv;
int err;
err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer);
if (err < 0)
return err;
strcpy(timer->name, "system timer");
timer->hw = snd_timer_system;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL) {
snd_timer_free(timer);
return -ENOMEM;
}
setup_timer(&priv->tlist, snd_timer_s_function, (unsigned long) timer);
timer->private_data = priv;
timer->private_free = snd_timer_free_system;
return snd_timer_global_register(timer);
}
#ifdef CONFIG_SND_PROC_FS
/*
* Info interface
*/
static void snd_timer_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_timer *timer;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->card && timer->card->shutdown)
continue;
switch (timer->tmr_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
snd_iprintf(buffer, "G%i: ", timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_CARD:
snd_iprintf(buffer, "C%i-%i: ",
timer->card->number, timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_PCM:
snd_iprintf(buffer, "P%i-%i-%i: ", timer->card->number,
timer->tmr_device, timer->tmr_subdevice);
break;
default:
snd_iprintf(buffer, "?%i-%i-%i-%i: ", timer->tmr_class,
timer->card ? timer->card->number : -1,
timer->tmr_device, timer->tmr_subdevice);
}
snd_iprintf(buffer, "%s :", timer->name);
if (timer->hw.resolution)
snd_iprintf(buffer, " %lu.%03luus (%lu ticks)",
timer->hw.resolution / 1000,
timer->hw.resolution % 1000,
timer->hw.ticks);
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
snd_iprintf(buffer, " SLAVE");
snd_iprintf(buffer, "\n");
list_for_each_entry(ti, &timer->open_list_head, open_list)
snd_iprintf(buffer, " Client %s : %s\n",
ti->owner ? ti->owner : "unknown",
ti->flags & (SNDRV_TIMER_IFLG_START |
SNDRV_TIMER_IFLG_RUNNING)
? "running" : "stopped");
}
mutex_unlock(®ister_mutex);
}
static struct snd_info_entry *snd_timer_proc_entry;
static void __init snd_timer_proc_init(void)
{
struct snd_info_entry *entry;
entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL);
if (entry != NULL) {
entry->c.text.read = snd_timer_proc_read;
if (snd_info_register(entry) < 0) {
snd_info_free_entry(entry);
entry = NULL;
}
}
snd_timer_proc_entry = entry;
}
static void __exit snd_timer_proc_done(void)
{
snd_info_free_entry(snd_timer_proc_entry);
}
#else /* !CONFIG_SND_PROC_FS */
#define snd_timer_proc_init()
#define snd_timer_proc_done()
#endif
/*
* USER SPACE interface
*/
static void snd_timer_user_interrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_read *r;
int prev;
spin_lock(&tu->qlock);
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->queue[prev];
if (r->resolution == resolution) {
r->ticks += ticks;
goto __wake;
}
}
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
r = &tu->queue[tu->qtail++];
tu->qtail %= tu->queue_size;
r->resolution = resolution;
r->ticks = ticks;
tu->qused++;
}
__wake:
spin_unlock(&tu->qlock);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_append_to_tqueue(struct snd_timer_user *tu,
struct snd_timer_tread *tread)
{
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread));
tu->qtail %= tu->queue_size;
tu->qused++;
}
}
static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
int event,
struct timespec *tstamp,
unsigned long resolution)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread r1;
unsigned long flags;
if (event >= SNDRV_TIMER_EVENT_START &&
event <= SNDRV_TIMER_EVENT_PAUSE)
tu->tstamp = *tstamp;
if ((tu->filter & (1 << event)) == 0 || !tu->tread)
return;
r1.event = event;
r1.tstamp = *tstamp;
r1.val = resolution;
spin_lock_irqsave(&tu->qlock, flags);
snd_timer_user_append_to_tqueue(tu, &r1);
spin_unlock_irqrestore(&tu->qlock, flags);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_disconnect(struct snd_timer_instance *timeri)
{
struct snd_timer_user *tu = timeri->callback_data;
tu->disconnected = true;
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread *r, r1;
struct timespec tstamp;
int prev, append = 0;
memset(&tstamp, 0, sizeof(tstamp));
spin_lock(&tu->qlock);
if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) |
(1 << SNDRV_TIMER_EVENT_TICK))) == 0) {
spin_unlock(&tu->qlock);
return;
}
if (tu->last_resolution != resolution || ticks > 0) {
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
tu->last_resolution != resolution) {
r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
r1.tstamp = tstamp;
r1.val = resolution;
snd_timer_user_append_to_tqueue(tu, &r1);
tu->last_resolution = resolution;
append++;
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0)
goto __wake;
if (ticks == 0)
goto __wake;
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->tqueue[prev];
if (r->event == SNDRV_TIMER_EVENT_TICK) {
r->tstamp = tstamp;
r->val += ticks;
append++;
goto __wake;
}
}
r1.event = SNDRV_TIMER_EVENT_TICK;
r1.tstamp = tstamp;
r1.val = ticks;
snd_timer_user_append_to_tqueue(tu, &r1);
append++;
__wake:
spin_unlock(&tu->qlock);
if (append == 0)
return;
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static int snd_timer_user_open(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (tu == NULL)
return -ENOMEM;
spin_lock_init(&tu->qlock);
init_waitqueue_head(&tu->qchange_sleep);
mutex_init(&tu->ioctl_lock);
tu->ticks = 1;
tu->queue_size = 128;
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL) {
kfree(tu);
return -ENOMEM;
}
file->private_data = tu;
return 0;
}
static int snd_timer_user_release(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
if (file->private_data) {
tu = file->private_data;
file->private_data = NULL;
mutex_lock(&tu->ioctl_lock);
if (tu->timeri)
snd_timer_close(tu->timeri);
mutex_unlock(&tu->ioctl_lock);
kfree(tu->queue);
kfree(tu->tqueue);
kfree(tu);
}
return 0;
}
static void snd_timer_user_zero_id(struct snd_timer_id *id)
{
id->dev_class = SNDRV_TIMER_CLASS_NONE;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = -1;
id->device = -1;
id->subdevice = -1;
}
static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer)
{
id->dev_class = timer->tmr_class;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = timer->card ? timer->card->number : -1;
id->device = timer->tmr_device;
id->subdevice = timer->tmr_subdevice;
}
static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
{
struct snd_timer_id id;
struct snd_timer *timer;
struct list_head *p;
if (copy_from_user(&id, _tid, sizeof(id)))
return -EFAULT;
mutex_lock(®ister_mutex);
if (id.dev_class < 0) { /* first item */
if (list_empty(&snd_timer_list))
snd_timer_user_zero_id(&id);
else {
timer = list_entry(snd_timer_list.next,
struct snd_timer, device_list);
snd_timer_user_copy_id(&id, timer);
}
} else {
switch (id.dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
id.device = id.device < 0 ? 0 : id.device + 1;
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device >= id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (id.card < 0) {
id.card = 0;
} else {
if (id.card < 0) {
id.card = 0;
} else {
if (id.device < 0) {
id.device = 0;
} else {
if (id.subdevice < 0) {
id.subdevice = 0;
} else {
id.subdevice++;
}
}
}
}
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > id.dev_class) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_class < id.dev_class)
continue;
if (timer->card->number > id.card) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->card->number < id.card)
continue;
if (timer->tmr_device > id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device < id.device)
continue;
if (timer->tmr_subdevice > id.subdevice) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_subdevice < id.subdevice)
continue;
snd_timer_user_copy_id(&id, timer);
break;
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
default:
snd_timer_user_zero_id(&id);
}
}
mutex_unlock(®ister_mutex);
if (copy_to_user(_tid, &id, sizeof(*_tid)))
return -EFAULT;
return 0;
}
static int snd_timer_user_ginfo(struct file *file,
struct snd_timer_ginfo __user *_ginfo)
{
struct snd_timer_ginfo *ginfo;
struct snd_timer_id tid;
struct snd_timer *t;
struct list_head *p;
int err = 0;
ginfo = memdup_user(_ginfo, sizeof(*ginfo));
if (IS_ERR(ginfo))
return PTR_ERR(ginfo);
tid = ginfo->tid;
memset(ginfo, 0, sizeof(*ginfo));
ginfo->tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
ginfo->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
ginfo->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(ginfo->id, t->id, sizeof(ginfo->id));
strlcpy(ginfo->name, t->name, sizeof(ginfo->name));
ginfo->resolution = t->hw.resolution;
if (t->hw.resolution_min > 0) {
ginfo->resolution_min = t->hw.resolution_min;
ginfo->resolution_max = t->hw.resolution_max;
}
list_for_each(p, &t->open_list_head) {
ginfo->clients++;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_ginfo, ginfo, sizeof(*ginfo)))
err = -EFAULT;
kfree(ginfo);
return err;
}
static int timer_set_gparams(struct snd_timer_gparams *gparams)
{
struct snd_timer *t;
int err;
mutex_lock(®ister_mutex);
t = snd_timer_find(&gparams->tid);
if (!t) {
err = -ENODEV;
goto _error;
}
if (!list_empty(&t->open_list_head)) {
err = -EBUSY;
goto _error;
}
if (!t->hw.set_period) {
err = -ENOSYS;
goto _error;
}
err = t->hw.set_period(t, gparams->period_num, gparams->period_den);
_error:
mutex_unlock(®ister_mutex);
return err;
}
static int snd_timer_user_gparams(struct file *file,
struct snd_timer_gparams __user *_gparams)
{
struct snd_timer_gparams gparams;
if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
return -EFAULT;
return timer_set_gparams(&gparams);
}
static int snd_timer_user_gstatus(struct file *file,
struct snd_timer_gstatus __user *_gstatus)
{
struct snd_timer_gstatus gstatus;
struct snd_timer_id tid;
struct snd_timer *t;
int err = 0;
if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus)))
return -EFAULT;
tid = gstatus.tid;
memset(&gstatus, 0, sizeof(gstatus));
gstatus.tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
if (t->hw.c_resolution)
gstatus.resolution = t->hw.c_resolution(t);
else
gstatus.resolution = t->hw.resolution;
if (t->hw.precise_resolution) {
t->hw.precise_resolution(t, &gstatus.resolution_num,
&gstatus.resolution_den);
} else {
gstatus.resolution_num = gstatus.resolution;
gstatus.resolution_den = 1000000000uL;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus)))
err = -EFAULT;
return err;
}
static int snd_timer_user_tselect(struct file *file,
struct snd_timer_select __user *_tselect)
{
struct snd_timer_user *tu;
struct snd_timer_select tselect;
char str[32];
int err = 0;
tu = file->private_data;
if (tu->timeri) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
err = -EFAULT;
goto __err;
}
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
if (err < 0)
goto __err;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
tu->tqueue = NULL;
if (tu->tread) {
tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread),
GFP_KERNEL);
if (tu->tqueue == NULL)
err = -ENOMEM;
} else {
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL)
err = -ENOMEM;
}
if (err < 0) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
} else {
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
? snd_timer_user_tinterrupt : snd_timer_user_interrupt;
tu->timeri->ccallback = snd_timer_user_ccallback;
tu->timeri->callback_data = (void *)tu;
tu->timeri->disconnect = snd_timer_user_disconnect;
}
__err:
return err;
}
static int snd_timer_user_info(struct file *file,
struct snd_timer_info __user *_info)
{
struct snd_timer_user *tu;
struct snd_timer_info *info;
struct snd_timer *t;
int err = 0;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (! info)
return -ENOMEM;
info->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
info->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(info->id, t->id, sizeof(info->id));
strlcpy(info->name, t->name, sizeof(info->name));
info->resolution = t->hw.resolution;
if (copy_to_user(_info, info, sizeof(*_info)))
err = -EFAULT;
kfree(info);
return err;
}
static int snd_timer_user_params(struct file *file,
struct snd_timer_params __user *_params)
{
struct snd_timer_user *tu;
struct snd_timer_params params;
struct snd_timer *t;
struct snd_timer_read *tr;
struct snd_timer_tread *ttr;
int err;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
if (copy_from_user(¶ms, _params, sizeof(params)))
return -EFAULT;
if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) {
err = -EINVAL;
goto _end;
}
if (params.queue_size > 0 &&
(params.queue_size < 32 || params.queue_size > 1024)) {
err = -EINVAL;
goto _end;
}
if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)|
(1<<SNDRV_TIMER_EVENT_TICK)|
(1<<SNDRV_TIMER_EVENT_START)|
(1<<SNDRV_TIMER_EVENT_STOP)|
(1<<SNDRV_TIMER_EVENT_CONTINUE)|
(1<<SNDRV_TIMER_EVENT_PAUSE)|
(1<<SNDRV_TIMER_EVENT_SUSPEND)|
(1<<SNDRV_TIMER_EVENT_RESUME)|
(1<<SNDRV_TIMER_EVENT_MSTART)|
(1<<SNDRV_TIMER_EVENT_MSTOP)|
(1<<SNDRV_TIMER_EVENT_MCONTINUE)|
(1<<SNDRV_TIMER_EVENT_MPAUSE)|
(1<<SNDRV_TIMER_EVENT_MSUSPEND)|
(1<<SNDRV_TIMER_EVENT_MRESUME))) {
err = -EINVAL;
goto _end;
}
snd_timer_stop(tu->timeri);
spin_lock_irq(&t->lock);
tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO|
SNDRV_TIMER_IFLG_EXCLUSIVE|
SNDRV_TIMER_IFLG_EARLY_EVENT);
if (params.flags & SNDRV_TIMER_PSFLG_AUTO)
tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO;
if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE;
if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT;
spin_unlock_irq(&t->lock);
if (params.queue_size > 0 &&
(unsigned int)tu->queue_size != params.queue_size) {
if (tu->tread) {
ttr = kmalloc(params.queue_size * sizeof(*ttr),
GFP_KERNEL);
if (ttr) {
kfree(tu->tqueue);
tu->queue_size = params.queue_size;
tu->tqueue = ttr;
}
} else {
tr = kmalloc(params.queue_size * sizeof(*tr),
GFP_KERNEL);
if (tr) {
kfree(tu->queue);
tu->queue_size = params.queue_size;
tu->queue = tr;
}
}
}
tu->qhead = tu->qtail = tu->qused = 0;
if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
if (tu->tread) {
struct snd_timer_tread tread;
memset(&tread, 0, sizeof(tread));
tread.event = SNDRV_TIMER_EVENT_EARLY;
tread.tstamp.tv_sec = 0;
tread.tstamp.tv_nsec = 0;
tread.val = 0;
snd_timer_user_append_to_tqueue(tu, &tread);
} else {
struct snd_timer_read *r = &tu->queue[0];
r->resolution = 0;
r->ticks = 0;
tu->qused++;
tu->qtail++;
}
}
tu->filter = params.filter;
tu->ticks = params.ticks;
err = 0;
_end:
if (copy_to_user(_params, ¶ms, sizeof(params)))
return -EFAULT;
return err;
}
static int snd_timer_user_status(struct file *file,
struct snd_timer_status __user *_status)
{
struct snd_timer_user *tu;
struct snd_timer_status status;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
memset(&status, 0, sizeof(status));
status.tstamp = tu->tstamp;
status.resolution = snd_timer_resolution(tu->timeri);
status.lost = tu->timeri->lost;
status.overrun = tu->overrun;
spin_lock_irq(&tu->qlock);
status.queue = tu->qused;
spin_unlock_irq(&tu->qlock);
if (copy_to_user(_status, &status, sizeof(status)))
return -EFAULT;
return 0;
}
static int snd_timer_user_start(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
snd_timer_stop(tu->timeri);
tu->timeri->lost = 0;
tu->last_resolution = 0;
return (err = snd_timer_start(tu->timeri, tu->ticks)) < 0 ? err : 0;
}
static int snd_timer_user_stop(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_stop(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_continue(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
tu->timeri->lost = 0;
return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_pause(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_pause(tu->timeri)) < 0 ? err : 0;
}
enum {
SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20),
SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21),
SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22),
SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
};
static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu;
void __user *argp = (void __user *)arg;
int __user *p = argp;
tu = file->private_data;
switch (cmd) {
case SNDRV_TIMER_IOCTL_PVERSION:
return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0;
case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
return snd_timer_user_next_device(argp);
case SNDRV_TIMER_IOCTL_TREAD:
{
int xarg;
if (tu->timeri) /* too late */
return -EBUSY;
if (get_user(xarg, p))
return -EFAULT;
tu->tread = xarg ? 1 : 0;
return 0;
}
case SNDRV_TIMER_IOCTL_GINFO:
return snd_timer_user_ginfo(file, argp);
case SNDRV_TIMER_IOCTL_GPARAMS:
return snd_timer_user_gparams(file, argp);
case SNDRV_TIMER_IOCTL_GSTATUS:
return snd_timer_user_gstatus(file, argp);
case SNDRV_TIMER_IOCTL_SELECT:
return snd_timer_user_tselect(file, argp);
case SNDRV_TIMER_IOCTL_INFO:
return snd_timer_user_info(file, argp);
case SNDRV_TIMER_IOCTL_PARAMS:
return snd_timer_user_params(file, argp);
case SNDRV_TIMER_IOCTL_STATUS:
return snd_timer_user_status(file, argp);
case SNDRV_TIMER_IOCTL_START:
case SNDRV_TIMER_IOCTL_START_OLD:
return snd_timer_user_start(file);
case SNDRV_TIMER_IOCTL_STOP:
case SNDRV_TIMER_IOCTL_STOP_OLD:
return snd_timer_user_stop(file);
case SNDRV_TIMER_IOCTL_CONTINUE:
case SNDRV_TIMER_IOCTL_CONTINUE_OLD:
return snd_timer_user_continue(file);
case SNDRV_TIMER_IOCTL_PAUSE:
case SNDRV_TIMER_IOCTL_PAUSE_OLD:
return snd_timer_user_pause(file);
}
return -ENOTTY;
}
static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu = file->private_data;
long ret;
mutex_lock(&tu->ioctl_lock);
ret = __snd_timer_user_ioctl(file, cmd, arg);
mutex_unlock(&tu->ioctl_lock);
return ret;
}
static int snd_timer_user_fasync(int fd, struct file * file, int on)
{
struct snd_timer_user *tu;
tu = file->private_data;
return fasync_helper(fd, file, on, &tu->fasync);
}
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
size_t count, loff_t *offset)
{
struct snd_timer_user *tu;
long result = 0, unit;
int qhead;
int err = 0;
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto _error;
}
set_current_state(TASK_INTERRUPTIBLE);
init_waitqueue_entry(&wait, current);
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
schedule();
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
if (tu->disconnected) {
err = -ENODEV;
goto _error;
}
if (signal_pending(current)) {
err = -ERESTARTSYS;
goto _error;
}
}
qhead = tu->qhead++;
tu->qhead %= tu->queue_size;
spin_unlock_irq(&tu->qlock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
err = -EFAULT;
} else {
if (copy_to_user(buffer, &tu->queue[qhead],
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
spin_lock_irq(&tu->qlock);
tu->qused--;
if (err < 0)
goto _error;
result += unit;
buffer += unit;
}
_error:
spin_unlock_irq(&tu->qlock);
return result > 0 ? result : err;
}
static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
{
unsigned int mask;
struct snd_timer_user *tu;
tu = file->private_data;
poll_wait(file, &tu->qchange_sleep, wait);
mask = 0;
if (tu->qused)
mask |= POLLIN | POLLRDNORM;
if (tu->disconnected)
mask |= POLLERR;
return mask;
}
#ifdef CONFIG_COMPAT
#include "timer_compat.c"
#else
#define snd_timer_user_ioctl_compat NULL
#endif
static const struct file_operations snd_timer_f_ops =
{
.owner = THIS_MODULE,
.read = snd_timer_user_read,
.open = snd_timer_user_open,
.release = snd_timer_user_release,
.llseek = no_llseek,
.poll = snd_timer_user_poll,
.unlocked_ioctl = snd_timer_user_ioctl,
.compat_ioctl = snd_timer_user_ioctl_compat,
.fasync = snd_timer_user_fasync,
};
/* unregister the system timer */
static void snd_timer_free_all(void)
{
struct snd_timer *timer, *n;
list_for_each_entry_safe(timer, n, &snd_timer_list, device_list)
snd_timer_free(timer);
}
static struct device timer_dev;
/*
* ENTRY functions
*/
static int __init alsa_timer_init(void)
{
int err;
snd_device_initialize(&timer_dev, NULL);
dev_set_name(&timer_dev, "timer");
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1,
"system timer");
#endif
err = snd_timer_register_system();
if (err < 0) {
pr_err("ALSA: unable to register system timer (%i)\n", err);
put_device(&timer_dev);
return err;
}
err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0,
&snd_timer_f_ops, NULL, &timer_dev);
if (err < 0) {
pr_err("ALSA: unable to register timer device (%i)\n", err);
snd_timer_free_all();
put_device(&timer_dev);
return err;
}
snd_timer_proc_init();
return 0;
}
static void __exit alsa_timer_exit(void)
{
snd_unregister_device(&timer_dev);
snd_timer_free_all();
put_device(&timer_dev);
snd_timer_proc_done();
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1);
#endif
}
module_init(alsa_timer_init)
module_exit(alsa_timer_exit)
EXPORT_SYMBOL(snd_timer_open);
EXPORT_SYMBOL(snd_timer_close);
EXPORT_SYMBOL(snd_timer_resolution);
EXPORT_SYMBOL(snd_timer_start);
EXPORT_SYMBOL(snd_timer_stop);
EXPORT_SYMBOL(snd_timer_continue);
EXPORT_SYMBOL(snd_timer_pause);
EXPORT_SYMBOL(snd_timer_new);
EXPORT_SYMBOL(snd_timer_notify);
EXPORT_SYMBOL(snd_timer_global_new);
EXPORT_SYMBOL(snd_timer_global_free);
EXPORT_SYMBOL(snd_timer_global_register);
EXPORT_SYMBOL(snd_timer_interrupt);
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_5058_0 |
crossvul-cpp_data_good_866_5 | /*
* Copyright (C) 2014-2019 Yubico AB - See COPYING
*/
/* Define which PAM interfaces we provide */
#define PAM_SM_AUTH
/* Include PAM headers */
#include <security/pam_appl.h>
#include <security/pam_modules.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <stdlib.h>
#include <syslog.h>
#include <pwd.h>
#include <string.h>
#include <errno.h>
#include "util.h"
#include "drop_privs.h"
/* If secure_getenv is not defined, define it here */
#ifndef HAVE_SECURE_GETENV
char *secure_getenv(const char *);
char *secure_getenv(const char *name) {
(void)name;
return NULL;
}
#endif
static void parse_cfg(int flags, int argc, const char **argv, cfg_t *cfg) {
struct stat st;
FILE *file = NULL;
int fd = -1;
int i;
memset(cfg, 0, sizeof(cfg_t));
cfg->debug_file = stderr;
for (i = 0; i < argc; i++) {
if (strncmp(argv[i], "max_devices=", 12) == 0)
sscanf(argv[i], "max_devices=%u", &cfg->max_devs);
if (strcmp(argv[i], "manual") == 0)
cfg->manual = 1;
if (strcmp(argv[i], "debug") == 0)
cfg->debug = 1;
if (strcmp(argv[i], "nouserok") == 0)
cfg->nouserok = 1;
if (strcmp(argv[i], "openasuser") == 0)
cfg->openasuser = 1;
if (strcmp(argv[i], "alwaysok") == 0)
cfg->alwaysok = 1;
if (strcmp(argv[i], "interactive") == 0)
cfg->interactive = 1;
if (strcmp(argv[i], "cue") == 0)
cfg->cue = 1;
if (strcmp(argv[i], "nodetect") == 0)
cfg->nodetect = 1;
if (strncmp(argv[i], "authfile=", 9) == 0)
cfg->auth_file = argv[i] + 9;
if (strncmp(argv[i], "authpending_file=", 17) == 0)
cfg->authpending_file = argv[i] + 17;
if (strncmp(argv[i], "origin=", 7) == 0)
cfg->origin = argv[i] + 7;
if (strncmp(argv[i], "appid=", 6) == 0)
cfg->appid = argv[i] + 6;
if (strncmp(argv[i], "prompt=", 7) == 0)
cfg->prompt = argv[i] + 7;
if (strncmp (argv[i], "debug_file=", 11) == 0) {
const char *filename = argv[i] + 11;
if(strncmp (filename, "stdout", 6) == 0) {
cfg->debug_file = stdout;
}
else if(strncmp (filename, "stderr", 6) == 0) {
cfg->debug_file = stderr;
}
else if( strncmp (filename, "syslog", 6) == 0) {
cfg->debug_file = (FILE *)-1;
}
else {
fd = open(filename, O_WRONLY | O_APPEND | O_CLOEXEC | O_NOFOLLOW | O_NOCTTY);
if (fd >= 0 && (fstat(fd, &st) == 0) && S_ISREG(st.st_mode)) {
file = fdopen(fd, "a");
if(file != NULL) {
cfg->debug_file = file;
cfg->is_custom_debug_file = 1;
file = NULL;
fd = -1;
}
}
}
}
}
if (cfg->debug) {
D(cfg->debug_file, "called.");
D(cfg->debug_file, "flags %d argc %d", flags, argc);
for (i = 0; i < argc; i++) {
D(cfg->debug_file, "argv[%d]=%s", i, argv[i]);
}
D(cfg->debug_file, "max_devices=%d", cfg->max_devs);
D(cfg->debug_file, "debug=%d", cfg->debug);
D(cfg->debug_file, "interactive=%d", cfg->interactive);
D(cfg->debug_file, "cue=%d", cfg->cue);
D(cfg->debug_file, "nodetect=%d", cfg->nodetect);
D(cfg->debug_file, "manual=%d", cfg->manual);
D(cfg->debug_file, "nouserok=%d", cfg->nouserok);
D(cfg->debug_file, "openasuser=%d", cfg->openasuser);
D(cfg->debug_file, "alwaysok=%d", cfg->alwaysok);
D(cfg->debug_file, "authfile=%s", cfg->auth_file ? cfg->auth_file : "(null)");
D(cfg->debug_file, "authpending_file=%s", cfg->authpending_file ? cfg->authpending_file : "(null)");
D(cfg->debug_file, "origin=%s", cfg->origin ? cfg->origin : "(null)");
D(cfg->debug_file, "appid=%s", cfg->appid ? cfg->appid : "(null)");
D(cfg->debug_file, "prompt=%s", cfg->prompt ? cfg->prompt : "(null)");
}
if (fd != -1)
close(fd);
if (file != NULL)
fclose(file);
}
#ifdef DBG
#undef DBG
#endif
#define DBG(...) \
if (cfg->debug) { \
D(cfg->debug_file, __VA_ARGS__); \
}
/* PAM entry point for authentication verification */
int pam_sm_authenticate(pam_handle_t *pamh, int flags, int argc,
const char **argv) {
struct passwd *pw = NULL, pw_s;
const char *user = NULL;
cfg_t cfg_st;
cfg_t *cfg = &cfg_st;
char buffer[BUFSIZE];
char *buf = NULL;
char *authfile_dir;
size_t authfile_dir_len;
int pgu_ret, gpn_ret;
int retval = PAM_IGNORE;
device_t *devices = NULL;
unsigned n_devices = 0;
int openasuser = 0;
int should_free_origin = 0;
int should_free_appid = 0;
int should_free_auth_file = 0;
int should_free_authpending_file = 0;
PAM_MODUTIL_DEF_PRIVS(privs);
parse_cfg(flags, argc, argv, cfg);
if (!cfg->origin) {
strcpy(buffer, DEFAULT_ORIGIN_PREFIX);
if (gethostname(buffer + strlen(DEFAULT_ORIGIN_PREFIX),
BUFSIZE - strlen(DEFAULT_ORIGIN_PREFIX)) == -1) {
DBG("Unable to get host name");
goto done;
}
DBG("Origin not specified, using \"%s\"", buffer);
cfg->origin = strdup(buffer);
if (!cfg->origin) {
DBG("Unable to allocate memory");
goto done;
} else {
should_free_origin = 1;
}
}
if (!cfg->appid) {
DBG("Appid not specified, using the same value of origin (%s)",
cfg->origin);
cfg->appid = strdup(cfg->origin);
if (!cfg->appid) {
DBG("Unable to allocate memory")
goto done;
} else {
should_free_appid = 1;
}
}
if (cfg->max_devs == 0) {
DBG("Maximum devices number not set. Using default (%d)", MAX_DEVS);
cfg->max_devs = MAX_DEVS;
}
devices = malloc(sizeof(device_t) * cfg->max_devs);
if (!devices) {
DBG("Unable to allocate memory");
retval = PAM_IGNORE;
goto done;
}
pgu_ret = pam_get_user(pamh, &user, NULL);
if (pgu_ret != PAM_SUCCESS || user == NULL) {
DBG("Unable to access user %s", user);
retval = PAM_CONV_ERR;
goto done;
}
DBG("Requesting authentication for user %s", user);
gpn_ret = getpwnam_r(user, &pw_s, buffer, sizeof(buffer), &pw);
if (gpn_ret != 0 || pw == NULL || pw->pw_dir == NULL ||
pw->pw_dir[0] != '/') {
DBG("Unable to retrieve credentials for user %s, (%s)", user,
strerror(errno));
retval = PAM_USER_UNKNOWN;
goto done;
}
DBG("Found user %s", user);
DBG("Home directory for %s is %s", user, pw->pw_dir);
if (!cfg->auth_file) {
buf = NULL;
authfile_dir = secure_getenv(DEFAULT_AUTHFILE_DIR_VAR);
if (!authfile_dir) {
DBG("Variable %s is not set. Using default value ($HOME/.config/)",
DEFAULT_AUTHFILE_DIR_VAR);
authfile_dir_len =
strlen(pw->pw_dir) + strlen("/.config") + strlen(DEFAULT_AUTHFILE) + 1;
buf = malloc(sizeof(char) * (authfile_dir_len));
if (!buf) {
DBG("Unable to allocate memory");
retval = PAM_IGNORE;
goto done;
}
/* Opening a file in a users $HOME, need to drop privs for security */
openasuser = geteuid() == 0 ? 1 : 0;
snprintf(buf, authfile_dir_len,
"%s/.config%s", pw->pw_dir, DEFAULT_AUTHFILE);
} else {
DBG("Variable %s set to %s", DEFAULT_AUTHFILE_DIR_VAR, authfile_dir);
authfile_dir_len = strlen(authfile_dir) + strlen(DEFAULT_AUTHFILE) + 1;
buf = malloc(sizeof(char) * (authfile_dir_len));
if (!buf) {
DBG("Unable to allocate memory");
retval = PAM_IGNORE;
goto done;
}
snprintf(buf, authfile_dir_len,
"%s%s", authfile_dir, DEFAULT_AUTHFILE);
if (!openasuser) {
DBG("WARNING: not dropping privileges when reading %s, please "
"consider setting openasuser=1 in the module configuration", buf);
}
}
DBG("Using authentication file %s", buf);
cfg->auth_file = buf; /* cfg takes ownership */
should_free_auth_file = 1;
buf = NULL;
} else {
DBG("Using authentication file %s", cfg->auth_file);
}
if (!openasuser) {
openasuser = geteuid() == 0 && cfg->openasuser;
}
if (openasuser) {
DBG("Dropping privileges");
if (pam_modutil_drop_priv(pamh, &privs, pw)) {
DBG("Unable to switch user to uid %i", pw->pw_uid);
retval = PAM_IGNORE;
goto done;
}
DBG("Switched to uid %i", pw->pw_uid);
}
retval = get_devices_from_authfile(cfg->auth_file, user, cfg->max_devs,
cfg->debug, cfg->debug_file,
devices, &n_devices);
if (openasuser) {
if (pam_modutil_regain_priv(pamh, &privs)) {
DBG("could not restore privileges");
retval = PAM_IGNORE;
goto done;
}
DBG("Restored privileges");
}
if (retval != 1) {
// for nouserok; make sure errors in get_devices_from_authfile don't
// result in valid devices
n_devices = 0;
}
if (n_devices == 0) {
if (cfg->nouserok) {
DBG("Found no devices but nouserok specified. Skipping authentication");
retval = PAM_SUCCESS;
goto done;
} else if (retval != 1) {
DBG("Unable to get devices from file %s", cfg->auth_file);
retval = PAM_AUTHINFO_UNAVAIL;
goto done;
} else {
DBG("Found no devices. Aborting.");
retval = PAM_AUTHINFO_UNAVAIL;
goto done;
}
}
// Determine the full path for authpending_file in order to emit touch request notifications
if (!cfg->authpending_file) {
int actual_size = snprintf(buffer, BUFSIZE, DEFAULT_AUTHPENDING_FILE_PATH, getuid());
if (actual_size >= 0 && actual_size < BUFSIZE) {
cfg->authpending_file = strdup(buffer);
}
if (!cfg->authpending_file) {
DBG("Unable to allocate memory for the authpending_file, touch request notifications will not be emitted");
} else {
should_free_authpending_file = 1;
}
} else {
if (strlen(cfg->authpending_file) == 0) {
DBG("authpending_file is set to an empty value, touch request notifications will be disabled");
cfg->authpending_file = NULL;
}
}
int authpending_file_descriptor = -1;
if (cfg->authpending_file) {
DBG("Using file '%s' for emitting touch request notifications", cfg->authpending_file);
// Open (or create) the authpending_file to indicate that we start waiting for a touch
authpending_file_descriptor =
open(cfg->authpending_file, O_RDONLY | O_CREAT | O_CLOEXEC | O_NOFOLLOW | O_NOCTTY, 0664);
if (authpending_file_descriptor < 0) {
DBG("Unable to emit 'authentication started' notification by opening the file '%s', (%s)",
cfg->authpending_file, strerror(errno));
}
}
if (cfg->manual == 0) {
if (cfg->interactive) {
converse(pamh, PAM_PROMPT_ECHO_ON,
cfg->prompt != NULL ? cfg->prompt : DEFAULT_PROMPT);
}
retval = do_authentication(cfg, devices, n_devices, pamh);
} else {
retval = do_manual_authentication(cfg, devices, n_devices, pamh);
}
// Close the authpending_file to indicate that we stop waiting for a touch
if (authpending_file_descriptor >= 0) {
if (close(authpending_file_descriptor) < 0) {
DBG("Unable to emit 'authentication stopped' notification by closing the file '%s', (%s)",
cfg->authpending_file, strerror(errno));
}
}
if (retval != 1) {
DBG("do_authentication returned %d", retval);
retval = PAM_AUTH_ERR;
goto done;
}
retval = PAM_SUCCESS;
done:
free_devices(devices, n_devices);
if (buf) {
free(buf);
buf = NULL;
}
if (should_free_origin) {
free((char *) cfg->origin);
cfg->origin = NULL;
}
if (should_free_appid) {
free((char *) cfg->appid);
cfg->appid = NULL;
}
if (should_free_auth_file) {
free((char *) cfg->auth_file);
cfg->auth_file = NULL;
}
if (should_free_authpending_file) {
free((char *) cfg->authpending_file);
cfg->authpending_file = NULL;
}
if (cfg->alwaysok && retval != PAM_SUCCESS) {
DBG("alwaysok needed (otherwise return with %d)", retval);
retval = PAM_SUCCESS;
}
DBG("done. [%s]", pam_strerror(pamh, retval));
if (cfg->is_custom_debug_file) {
fclose(cfg->debug_file);
}
return retval;
}
PAM_EXTERN int pam_sm_setcred(pam_handle_t *pamh, int flags, int argc,
const char **argv) {
(void)pamh;
(void)flags;
(void)argc;
(void)argv;
return PAM_SUCCESS;
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_866_5 |
crossvul-cpp_data_bad_603_0 | // SPDX-License-Identifier: GPL-2.0
/* sbuslib.c: Helper library for SBUS framebuffer drivers.
*
* Copyright (C) 2003 David S. Miller (davem@redhat.com)
*/
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/of_device.h>
#include <asm/fbio.h>
#include "sbuslib.h"
void sbusfb_fill_var(struct fb_var_screeninfo *var, struct device_node *dp,
int bpp)
{
memset(var, 0, sizeof(*var));
var->xres = of_getintprop_default(dp, "width", 1152);
var->yres = of_getintprop_default(dp, "height", 900);
var->xres_virtual = var->xres;
var->yres_virtual = var->yres;
var->bits_per_pixel = bpp;
}
EXPORT_SYMBOL(sbusfb_fill_var);
static unsigned long sbusfb_mmapsize(long size, unsigned long fbsize)
{
if (size == SBUS_MMAP_EMPTY) return 0;
if (size >= 0) return size;
return fbsize * (-size);
}
int sbusfb_mmap_helper(struct sbus_mmap_map *map,
unsigned long physbase,
unsigned long fbsize,
unsigned long iospace,
struct vm_area_struct *vma)
{
unsigned int size, page, r, map_size;
unsigned long map_offset = 0;
unsigned long off;
int i;
if (!(vma->vm_flags & (VM_SHARED | VM_MAYSHARE)))
return -EINVAL;
size = vma->vm_end - vma->vm_start;
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
off = vma->vm_pgoff << PAGE_SHIFT;
/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/* Each page, see which map applies */
for (page = 0; page < size; ){
map_size = 0;
for (i = 0; map[i].size; i++)
if (map[i].voff == off+page) {
map_size = sbusfb_mmapsize(map[i].size, fbsize);
#ifdef __sparc_v9__
#define POFF_MASK (PAGE_MASK|0x1UL)
#else
#define POFF_MASK (PAGE_MASK)
#endif
map_offset = (physbase + map[i].poff) & POFF_MASK;
break;
}
if (!map_size) {
page += PAGE_SIZE;
continue;
}
if (page + map_size > size)
map_size = size - page;
r = io_remap_pfn_range(vma,
vma->vm_start + page,
MK_IOSPACE_PFN(iospace,
map_offset >> PAGE_SHIFT),
map_size,
vma->vm_page_prot);
if (r)
return -EAGAIN;
page += map_size;
}
return 0;
}
EXPORT_SYMBOL(sbusfb_mmap_helper);
int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
struct fb_info *info,
int type, int fb_depth, unsigned long fb_size)
{
switch(cmd) {
case FBIOGTYPE: {
struct fbtype __user *f = (struct fbtype __user *) arg;
if (put_user(type, &f->fb_type) ||
__put_user(info->var.yres, &f->fb_height) ||
__put_user(info->var.xres, &f->fb_width) ||
__put_user(fb_depth, &f->fb_depth) ||
__put_user(0, &f->fb_cmsize) ||
__put_user(fb_size, &f->fb_cmsize))
return -EFAULT;
return 0;
}
case FBIOPUTCMAP_SPARC: {
struct fbcmap __user *c = (struct fbcmap __user *) arg;
struct fb_cmap cmap;
u16 red, green, blue;
u8 red8, green8, blue8;
unsigned char __user *ured;
unsigned char __user *ugreen;
unsigned char __user *ublue;
int index, count, i;
if (get_user(index, &c->index) ||
__get_user(count, &c->count) ||
__get_user(ured, &c->red) ||
__get_user(ugreen, &c->green) ||
__get_user(ublue, &c->blue))
return -EFAULT;
cmap.len = 1;
cmap.red = &red;
cmap.green = &green;
cmap.blue = &blue;
cmap.transp = NULL;
for (i = 0; i < count; i++) {
int err;
if (get_user(red8, &ured[i]) ||
get_user(green8, &ugreen[i]) ||
get_user(blue8, &ublue[i]))
return -EFAULT;
red = red8 << 8;
green = green8 << 8;
blue = blue8 << 8;
cmap.start = index + i;
err = fb_set_cmap(&cmap, info);
if (err)
return err;
}
return 0;
}
case FBIOGETCMAP_SPARC: {
struct fbcmap __user *c = (struct fbcmap __user *) arg;
unsigned char __user *ured;
unsigned char __user *ugreen;
unsigned char __user *ublue;
struct fb_cmap *cmap = &info->cmap;
int index, count, i;
u8 red, green, blue;
if (get_user(index, &c->index) ||
__get_user(count, &c->count) ||
__get_user(ured, &c->red) ||
__get_user(ugreen, &c->green) ||
__get_user(ublue, &c->blue))
return -EFAULT;
if (index + count > cmap->len)
return -EINVAL;
for (i = 0; i < count; i++) {
red = cmap->red[index + i] >> 8;
green = cmap->green[index + i] >> 8;
blue = cmap->blue[index + i] >> 8;
if (put_user(red, &ured[i]) ||
put_user(green, &ugreen[i]) ||
put_user(blue, &ublue[i]))
return -EFAULT;
}
return 0;
}
default:
return -EINVAL;
}
}
EXPORT_SYMBOL(sbusfb_ioctl_helper);
#ifdef CONFIG_COMPAT
static int fbiogetputcmap(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
struct fbcmap32 __user *argp = (void __user *)arg;
struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
u32 addr;
int ret;
ret = copy_in_user(p, argp, 2 * sizeof(int));
ret |= get_user(addr, &argp->red);
ret |= put_user(compat_ptr(addr), &p->red);
ret |= get_user(addr, &argp->green);
ret |= put_user(compat_ptr(addr), &p->green);
ret |= get_user(addr, &argp->blue);
ret |= put_user(compat_ptr(addr), &p->blue);
if (ret)
return -EFAULT;
return info->fbops->fb_ioctl(info,
(cmd == FBIOPUTCMAP32) ?
FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC,
(unsigned long)p);
}
static int fbiogscursor(struct fb_info *info, unsigned long arg)
{
struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
struct fbcursor32 __user *argp = (void __user *)arg;
compat_uptr_t addr;
int ret;
ret = copy_in_user(p, argp,
2 * sizeof (short) + 2 * sizeof(struct fbcurpos));
ret |= copy_in_user(&p->size, &argp->size, sizeof(struct fbcurpos));
ret |= copy_in_user(&p->cmap, &argp->cmap, 2 * sizeof(int));
ret |= get_user(addr, &argp->cmap.red);
ret |= put_user(compat_ptr(addr), &p->cmap.red);
ret |= get_user(addr, &argp->cmap.green);
ret |= put_user(compat_ptr(addr), &p->cmap.green);
ret |= get_user(addr, &argp->cmap.blue);
ret |= put_user(compat_ptr(addr), &p->cmap.blue);
ret |= get_user(addr, &argp->mask);
ret |= put_user(compat_ptr(addr), &p->mask);
ret |= get_user(addr, &argp->image);
ret |= put_user(compat_ptr(addr), &p->image);
if (ret)
return -EFAULT;
return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p);
}
int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case FBIOGTYPE:
case FBIOSATTR:
case FBIOGATTR:
case FBIOSVIDEO:
case FBIOGVIDEO:
case FBIOGCURSOR32: /* This is not implemented yet.
Later it should be converted... */
case FBIOSCURPOS:
case FBIOGCURPOS:
case FBIOGCURMAX:
return info->fbops->fb_ioctl(info, cmd, arg);
case FBIOPUTCMAP32:
return fbiogetputcmap(info, cmd, arg);
case FBIOGETCMAP32:
return fbiogetputcmap(info, cmd, arg);
case FBIOSCURSOR32:
return fbiogscursor(info, arg);
default:
return -ENOIOCTLCMD;
}
}
EXPORT_SYMBOL(sbusfb_compat_ioctl);
#endif
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_603_0 |
crossvul-cpp_data_bad_5059_0 | /*
* Timers abstract layer
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/string.h>
#include <sound/core.h>
#include <sound/timer.h>
#include <sound/control.h>
#include <sound/info.h>
#include <sound/minors.h>
#include <sound/initval.h>
#include <linux/kmod.h>
#if IS_ENABLED(CONFIG_SND_HRTIMER)
#define DEFAULT_TIMER_LIMIT 4
#else
#define DEFAULT_TIMER_LIMIT 1
#endif
static int timer_limit = DEFAULT_TIMER_LIMIT;
static int timer_tstamp_monotonic = 1;
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>");
MODULE_DESCRIPTION("ALSA timer interface");
MODULE_LICENSE("GPL");
module_param(timer_limit, int, 0444);
MODULE_PARM_DESC(timer_limit, "Maximum global timers in system.");
module_param(timer_tstamp_monotonic, int, 0444);
MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default).");
MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER);
MODULE_ALIAS("devname:snd/timer");
struct snd_timer_user {
struct snd_timer_instance *timeri;
int tread; /* enhanced read with timestamps and events */
unsigned long ticks;
unsigned long overrun;
int qhead;
int qtail;
int qused;
int queue_size;
bool disconnected;
struct snd_timer_read *queue;
struct snd_timer_tread *tqueue;
spinlock_t qlock;
unsigned long last_resolution;
unsigned int filter;
struct timespec tstamp; /* trigger tstamp */
wait_queue_head_t qchange_sleep;
struct fasync_struct *fasync;
struct mutex ioctl_lock;
};
/* list of timers */
static LIST_HEAD(snd_timer_list);
/* list of slave instances */
static LIST_HEAD(snd_timer_slave_list);
/* lock for slave active lists */
static DEFINE_SPINLOCK(slave_active_lock);
static DEFINE_MUTEX(register_mutex);
static int snd_timer_free(struct snd_timer *timer);
static int snd_timer_dev_free(struct snd_device *device);
static int snd_timer_dev_register(struct snd_device *device);
static int snd_timer_dev_disconnect(struct snd_device *device);
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left);
/*
* create a timer instance with the given owner string.
* when timer is not NULL, increments the module counter
*/
static struct snd_timer_instance *snd_timer_instance_new(char *owner,
struct snd_timer *timer)
{
struct snd_timer_instance *timeri;
timeri = kzalloc(sizeof(*timeri), GFP_KERNEL);
if (timeri == NULL)
return NULL;
timeri->owner = kstrdup(owner, GFP_KERNEL);
if (! timeri->owner) {
kfree(timeri);
return NULL;
}
INIT_LIST_HEAD(&timeri->open_list);
INIT_LIST_HEAD(&timeri->active_list);
INIT_LIST_HEAD(&timeri->ack_list);
INIT_LIST_HEAD(&timeri->slave_list_head);
INIT_LIST_HEAD(&timeri->slave_active_head);
timeri->timer = timer;
if (timer && !try_module_get(timer->module)) {
kfree(timeri->owner);
kfree(timeri);
return NULL;
}
return timeri;
}
/*
* find a timer instance from the given timer id
*/
static struct snd_timer *snd_timer_find(struct snd_timer_id *tid)
{
struct snd_timer *timer = NULL;
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->tmr_class != tid->dev_class)
continue;
if ((timer->tmr_class == SNDRV_TIMER_CLASS_CARD ||
timer->tmr_class == SNDRV_TIMER_CLASS_PCM) &&
(timer->card == NULL ||
timer->card->number != tid->card))
continue;
if (timer->tmr_device != tid->device)
continue;
if (timer->tmr_subdevice != tid->subdevice)
continue;
return timer;
}
return NULL;
}
#ifdef CONFIG_MODULES
static void snd_timer_request(struct snd_timer_id *tid)
{
switch (tid->dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
if (tid->device < timer_limit)
request_module("snd-timer-%i", tid->device);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (tid->card < snd_ecards_limit)
request_module("snd-card-%i", tid->card);
break;
default:
break;
}
}
#endif
/*
* look for a master instance matching with the slave id of the given slave.
* when found, relink the open_link of the slave.
*
* call this with register_mutex down.
*/
static void snd_timer_check_slave(struct snd_timer_instance *slave)
{
struct snd_timer *timer;
struct snd_timer_instance *master;
/* FIXME: it's really dumb to look up all entries.. */
list_for_each_entry(timer, &snd_timer_list, device_list) {
list_for_each_entry(master, &timer->open_list_head, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list,
&master->slave_list_head);
spin_lock_irq(&slave_active_lock);
slave->master = master;
slave->timer = master->timer;
spin_unlock_irq(&slave_active_lock);
return;
}
}
}
}
/*
* look for slave instances matching with the slave id of the given master.
* when found, relink the open_link of slaves.
*
* call this with register_mutex down.
*/
static void snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
/* check all pending slaves */
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list, &master->slave_list_head);
spin_lock_irq(&slave_active_lock);
spin_lock(&master->timer->lock);
slave->master = master;
slave->timer = master->timer;
if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
list_add_tail(&slave->active_list,
&master->slave_active_head);
spin_unlock(&master->timer->lock);
spin_unlock_irq(&slave_active_lock);
}
}
}
/*
* open a timer instance
* when opening a master, the slave id must be here given.
*/
int snd_timer_open(struct snd_timer_instance **ti,
char *owner, struct snd_timer_id *tid,
unsigned int slave_id)
{
struct snd_timer *timer;
struct snd_timer_instance *timeri = NULL;
if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
/* open a slave instance */
if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE ||
tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) {
pr_debug("ALSA: timer: invalid slave class %i\n",
tid->dev_sclass);
return -EINVAL;
}
mutex_lock(®ister_mutex);
timeri = snd_timer_instance_new(owner, NULL);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
snd_timer_check_slave(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
/* open a master instance */
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
#ifdef CONFIG_MODULES
if (!timer) {
mutex_unlock(®ister_mutex);
snd_timer_request(tid);
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
}
#endif
if (!timer) {
mutex_unlock(®ister_mutex);
return -ENODEV;
}
if (!list_empty(&timer->open_list_head)) {
timeri = list_entry(timer->open_list_head.next,
struct snd_timer_instance, open_list);
if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
mutex_unlock(®ister_mutex);
return -EBUSY;
}
}
timeri = snd_timer_instance_new(owner, timer);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
/* take a card refcount for safe disconnection */
if (timer->card)
get_device(&timer->card->card_dev);
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = slave_id;
if (list_empty(&timer->open_list_head) && timer->hw.open)
timer->hw.open(timer);
list_add_tail(&timeri->open_list, &timer->open_list_head);
snd_timer_check_master(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
/*
* close a timer instance
*/
int snd_timer_close(struct snd_timer_instance *timeri)
{
struct snd_timer *timer = NULL;
struct snd_timer_instance *slave, *tmp;
if (snd_BUG_ON(!timeri))
return -ENXIO;
mutex_lock(®ister_mutex);
list_del(&timeri->open_list);
/* force to stop the timer */
snd_timer_stop(timeri);
timer = timeri->timer;
if (timer) {
/* wait, until the active callback is finished */
spin_lock_irq(&timer->lock);
while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
spin_unlock_irq(&timer->lock);
udelay(10);
spin_lock_irq(&timer->lock);
}
spin_unlock_irq(&timer->lock);
/* remove slave links */
spin_lock_irq(&slave_active_lock);
spin_lock(&timer->lock);
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
open_list) {
list_move_tail(&slave->open_list, &snd_timer_slave_list);
slave->master = NULL;
slave->timer = NULL;
list_del_init(&slave->ack_list);
list_del_init(&slave->active_list);
}
spin_unlock(&timer->lock);
spin_unlock_irq(&slave_active_lock);
/* slave doesn't need to release timer resources below */
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
timer = NULL;
}
if (timeri->private_free)
timeri->private_free(timeri);
kfree(timeri->owner);
kfree(timeri);
if (timer) {
if (list_empty(&timer->open_list_head) && timer->hw.close)
timer->hw.close(timer);
/* release a card refcount for safe disconnection */
if (timer->card)
put_device(&timer->card->card_dev);
module_put(timer->module);
}
mutex_unlock(®ister_mutex);
return 0;
}
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
{
struct snd_timer * timer;
if (timeri == NULL)
return 0;
if ((timer = timeri->timer) != NULL) {
if (timer->hw.c_resolution)
return timer->hw.c_resolution(timer);
return timer->hw.resolution;
}
return 0;
}
static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
{
struct snd_timer *timer;
unsigned long resolution = 0;
struct snd_timer_instance *ts;
struct timespec tstamp;
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START ||
event > SNDRV_TIMER_EVENT_PAUSE))
return;
if (event == SNDRV_TIMER_EVENT_START ||
event == SNDRV_TIMER_EVENT_CONTINUE)
resolution = snd_timer_resolution(ti);
if (ti->ccallback)
ti->ccallback(ti, event, &tstamp, resolution);
if (ti->flags & SNDRV_TIMER_IFLG_SLAVE)
return;
timer = ti->timer;
if (timer == NULL)
return;
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
return;
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event + 100, &tstamp, resolution);
}
/* start/continue a master timer */
static int snd_timer_start1(struct snd_timer_instance *timeri,
bool start, unsigned long ticks)
{
struct snd_timer *timer;
int result;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (timer->card && timer->card->shutdown) {
result = -ENODEV;
goto unlock;
}
if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START)) {
result = -EBUSY;
goto unlock;
}
if (start)
timeri->ticks = timeri->cticks = ticks;
else if (!timeri->cticks)
timeri->cticks = 1;
timeri->pticks = 0;
list_move_tail(&timeri->active_list, &timer->active_list_head);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
goto __start_now;
timer->flags |= SNDRV_TIMER_FLG_RESCHED;
timeri->flags |= SNDRV_TIMER_IFLG_START;
result = 1; /* delayed start */
} else {
if (start)
timer->sticks = ticks;
timer->hw.start(timer);
__start_now:
timer->running++;
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
result = 0;
}
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* start/continue a slave timer */
static int snd_timer_start_slave(struct snd_timer_instance *timeri,
bool start)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master && timeri->timer) {
spin_lock(&timeri->timer->lock);
list_add_tail(&timeri->active_list,
&timeri->master->slave_active_head);
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 1; /* delayed start */
}
/* stop/pause a master timer */
static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
{
struct snd_timer *timer;
int result = 0;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START))) {
result = -EBUSY;
goto unlock;
}
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if (timer->card && timer->card->shutdown)
goto unlock;
if (stop) {
timeri->cticks = timeri->ticks;
timeri->pticks = 0;
}
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
timer->hw.stop(timer);
if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
snd_timer_reschedule(timer, 0);
if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
}
}
timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* stop/pause a slave timer */
static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
if (timeri->timer) {
spin_lock(&timeri->timer->lock);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 0;
}
/*
* start the timer instance
*/
int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
{
if (timeri == NULL || ticks < 1)
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, true);
else
return snd_timer_start1(timeri, true, ticks);
}
/*
* stop the timer instance.
*
* do not call this from the timer callback!
*/
int snd_timer_stop(struct snd_timer_instance *timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, true);
else
return snd_timer_stop1(timeri, true);
}
/*
* start again.. the tick is kept.
*/
int snd_timer_continue(struct snd_timer_instance *timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, false);
else
return snd_timer_start1(timeri, false, 0);
}
/*
* pause.. remember the ticks left
*/
int snd_timer_pause(struct snd_timer_instance * timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, false);
else
return snd_timer_stop1(timeri, false);
}
/*
* reschedule the timer
*
* start pending instances and check the scheduling ticks.
* when the scheduling ticks is changed set CHANGE flag to reprogram the timer.
*/
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti;
unsigned long ticks = ~0UL;
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->flags & SNDRV_TIMER_IFLG_START) {
ti->flags &= ~SNDRV_TIMER_IFLG_START;
ti->flags |= SNDRV_TIMER_IFLG_RUNNING;
timer->running++;
}
if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) {
if (ticks > ti->cticks)
ticks = ti->cticks;
}
}
if (ticks == ~0UL) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
return;
}
if (ticks > timer->hw.ticks)
ticks = timer->hw.ticks;
if (ticks_left != ticks)
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
timer->sticks = ticks;
}
/*
* timer tasklet
*
*/
static void snd_timer_tasklet(unsigned long arg)
{
struct snd_timer *timer = (struct snd_timer *) arg;
struct snd_timer_instance *ti;
struct list_head *p;
unsigned long resolution, ticks;
unsigned long flags;
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* now process all callbacks */
while (!list_empty(&timer->sack_list_head)) {
p = timer->sack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
resolution = ti->resolution;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* timer interrupt
*
* ticks_left is usually equal to timer->sticks.
*
*/
void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti, *ts, *tmp;
unsigned long resolution, ticks;
struct list_head *p, *ack_list_head;
unsigned long flags;
int use_tasklet = 0;
if (timer == NULL)
return;
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* remember the current resolution */
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
/* loop for all active instances
* Here we cannot use list_for_each_entry because the active_list of a
* processed instance is relinked to done_list_head before the callback
* is called.
*/
list_for_each_entry_safe(ti, tmp, &timer->active_list_head,
active_list) {
if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING))
continue;
ti->pticks += ticks_left;
ti->resolution = resolution;
if (ti->cticks < ticks_left)
ti->cticks = 0;
else
ti->cticks -= ticks_left;
if (ti->cticks) /* not expired */
continue;
if (ti->flags & SNDRV_TIMER_IFLG_AUTO) {
ti->cticks = ti->ticks;
} else {
ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
--timer->running;
list_del_init(&ti->active_list);
}
if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
(ti->flags & SNDRV_TIMER_IFLG_FAST))
ack_list_head = &timer->ack_list_head;
else
ack_list_head = &timer->sack_list_head;
if (list_empty(&ti->ack_list))
list_add_tail(&ti->ack_list, ack_list_head);
list_for_each_entry(ts, &ti->slave_active_head, active_list) {
ts->pticks = ti->pticks;
ts->resolution = resolution;
if (list_empty(&ts->ack_list))
list_add_tail(&ts->ack_list, ack_list_head);
}
}
if (timer->flags & SNDRV_TIMER_FLG_RESCHED)
snd_timer_reschedule(timer, timer->sticks);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_STOP) {
timer->hw.stop(timer);
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
}
if (!(timer->hw.flags & SNDRV_TIMER_HW_AUTO) ||
(timer->flags & SNDRV_TIMER_FLG_CHANGE)) {
/* restart timer */
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
} else {
timer->hw.stop(timer);
}
/* now process all fast callbacks */
while (!list_empty(&timer->ack_list_head)) {
p = timer->ack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
/* do we have any slow callbacks? */
use_tasklet = !list_empty(&timer->sack_list_head);
spin_unlock_irqrestore(&timer->lock, flags);
if (use_tasklet)
tasklet_schedule(&timer->task_queue);
}
/*
*/
int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
struct snd_timer **rtimer)
{
struct snd_timer *timer;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_timer_dev_free,
.dev_register = snd_timer_dev_register,
.dev_disconnect = snd_timer_dev_disconnect,
};
if (snd_BUG_ON(!tid))
return -EINVAL;
if (rtimer)
*rtimer = NULL;
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
timer->tmr_class = tid->dev_class;
timer->card = card;
timer->tmr_device = tid->device;
timer->tmr_subdevice = tid->subdevice;
if (id)
strlcpy(timer->id, id, sizeof(timer->id));
INIT_LIST_HEAD(&timer->device_list);
INIT_LIST_HEAD(&timer->open_list_head);
INIT_LIST_HEAD(&timer->active_list_head);
INIT_LIST_HEAD(&timer->ack_list_head);
INIT_LIST_HEAD(&timer->sack_list_head);
spin_lock_init(&timer->lock);
tasklet_init(&timer->task_queue, snd_timer_tasklet,
(unsigned long)timer);
if (card != NULL) {
timer->module = card->module;
err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
if (err < 0) {
snd_timer_free(timer);
return err;
}
}
if (rtimer)
*rtimer = timer;
return 0;
}
static int snd_timer_free(struct snd_timer *timer)
{
if (!timer)
return 0;
mutex_lock(®ister_mutex);
if (! list_empty(&timer->open_list_head)) {
struct list_head *p, *n;
struct snd_timer_instance *ti;
pr_warn("ALSA: timer %p is busy?\n", timer);
list_for_each_safe(p, n, &timer->open_list_head) {
list_del_init(p);
ti = list_entry(p, struct snd_timer_instance, open_list);
ti->timer = NULL;
}
}
list_del(&timer->device_list);
mutex_unlock(®ister_mutex);
if (timer->private_free)
timer->private_free(timer);
kfree(timer);
return 0;
}
static int snd_timer_dev_free(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
return snd_timer_free(timer);
}
static int snd_timer_dev_register(struct snd_device *dev)
{
struct snd_timer *timer = dev->device_data;
struct snd_timer *timer1;
if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop))
return -ENXIO;
if (!(timer->hw.flags & SNDRV_TIMER_HW_SLAVE) &&
!timer->hw.resolution && timer->hw.c_resolution == NULL)
return -EINVAL;
mutex_lock(®ister_mutex);
list_for_each_entry(timer1, &snd_timer_list, device_list) {
if (timer1->tmr_class > timer->tmr_class)
break;
if (timer1->tmr_class < timer->tmr_class)
continue;
if (timer1->card && timer->card) {
if (timer1->card->number > timer->card->number)
break;
if (timer1->card->number < timer->card->number)
continue;
}
if (timer1->tmr_device > timer->tmr_device)
break;
if (timer1->tmr_device < timer->tmr_device)
continue;
if (timer1->tmr_subdevice > timer->tmr_subdevice)
break;
if (timer1->tmr_subdevice < timer->tmr_subdevice)
continue;
/* conflicts.. */
mutex_unlock(®ister_mutex);
return -EBUSY;
}
list_add_tail(&timer->device_list, &timer1->device_list);
mutex_unlock(®ister_mutex);
return 0;
}
static int snd_timer_dev_disconnect(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_del_init(&timer->device_list);
/* wake up pending sleepers */
list_for_each_entry(ti, &timer->open_list_head, open_list) {
if (ti->disconnect)
ti->disconnect(ti);
}
mutex_unlock(®ister_mutex);
return 0;
}
void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp)
{
unsigned long flags;
unsigned long resolution = 0;
struct snd_timer_instance *ti, *ts;
if (timer->card && timer->card->shutdown)
return;
if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
return;
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
event > SNDRV_TIMER_EVENT_MRESUME))
return;
spin_lock_irqsave(&timer->lock, flags);
if (event == SNDRV_TIMER_EVENT_MSTART ||
event == SNDRV_TIMER_EVENT_MCONTINUE ||
event == SNDRV_TIMER_EVENT_MRESUME) {
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
}
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->ccallback)
ti->ccallback(ti, event, tstamp, resolution);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event, tstamp, resolution);
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* exported functions for global timers
*/
int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer)
{
struct snd_timer_id tid;
tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL;
tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE;
tid.card = -1;
tid.device = device;
tid.subdevice = 0;
return snd_timer_new(NULL, id, &tid, rtimer);
}
int snd_timer_global_free(struct snd_timer *timer)
{
return snd_timer_free(timer);
}
int snd_timer_global_register(struct snd_timer *timer)
{
struct snd_device dev;
memset(&dev, 0, sizeof(dev));
dev.device_data = timer;
return snd_timer_dev_register(&dev);
}
/*
* System timer
*/
struct snd_timer_system_private {
struct timer_list tlist;
unsigned long last_expires;
unsigned long last_jiffies;
unsigned long correction;
};
static void snd_timer_s_function(unsigned long data)
{
struct snd_timer *timer = (struct snd_timer *)data;
struct snd_timer_system_private *priv = timer->private_data;
unsigned long jiff = jiffies;
if (time_after(jiff, priv->last_expires))
priv->correction += (long)jiff - (long)priv->last_expires;
snd_timer_interrupt(timer, (long)jiff - (long)priv->last_jiffies);
}
static int snd_timer_s_start(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long njiff;
priv = (struct snd_timer_system_private *) timer->private_data;
njiff = (priv->last_jiffies = jiffies);
if (priv->correction > timer->sticks - 1) {
priv->correction -= timer->sticks - 1;
njiff++;
} else {
njiff += timer->sticks - priv->correction;
priv->correction = 0;
}
priv->last_expires = njiff;
mod_timer(&priv->tlist, njiff);
return 0;
}
static int snd_timer_s_stop(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long jiff;
priv = (struct snd_timer_system_private *) timer->private_data;
del_timer(&priv->tlist);
jiff = jiffies;
if (time_before(jiff, priv->last_expires))
timer->sticks = priv->last_expires - jiff;
else
timer->sticks = 1;
priv->correction = 0;
return 0;
}
static int snd_timer_s_close(struct snd_timer *timer)
{
struct snd_timer_system_private *priv;
priv = (struct snd_timer_system_private *)timer->private_data;
del_timer_sync(&priv->tlist);
return 0;
}
static struct snd_timer_hardware snd_timer_system =
{
.flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET,
.resolution = 1000000000L / HZ,
.ticks = 10000000L,
.close = snd_timer_s_close,
.start = snd_timer_s_start,
.stop = snd_timer_s_stop
};
static void snd_timer_free_system(struct snd_timer *timer)
{
kfree(timer->private_data);
}
static int snd_timer_register_system(void)
{
struct snd_timer *timer;
struct snd_timer_system_private *priv;
int err;
err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer);
if (err < 0)
return err;
strcpy(timer->name, "system timer");
timer->hw = snd_timer_system;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL) {
snd_timer_free(timer);
return -ENOMEM;
}
setup_timer(&priv->tlist, snd_timer_s_function, (unsigned long) timer);
timer->private_data = priv;
timer->private_free = snd_timer_free_system;
return snd_timer_global_register(timer);
}
#ifdef CONFIG_SND_PROC_FS
/*
* Info interface
*/
static void snd_timer_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_timer *timer;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->card && timer->card->shutdown)
continue;
switch (timer->tmr_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
snd_iprintf(buffer, "G%i: ", timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_CARD:
snd_iprintf(buffer, "C%i-%i: ",
timer->card->number, timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_PCM:
snd_iprintf(buffer, "P%i-%i-%i: ", timer->card->number,
timer->tmr_device, timer->tmr_subdevice);
break;
default:
snd_iprintf(buffer, "?%i-%i-%i-%i: ", timer->tmr_class,
timer->card ? timer->card->number : -1,
timer->tmr_device, timer->tmr_subdevice);
}
snd_iprintf(buffer, "%s :", timer->name);
if (timer->hw.resolution)
snd_iprintf(buffer, " %lu.%03luus (%lu ticks)",
timer->hw.resolution / 1000,
timer->hw.resolution % 1000,
timer->hw.ticks);
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
snd_iprintf(buffer, " SLAVE");
snd_iprintf(buffer, "\n");
list_for_each_entry(ti, &timer->open_list_head, open_list)
snd_iprintf(buffer, " Client %s : %s\n",
ti->owner ? ti->owner : "unknown",
ti->flags & (SNDRV_TIMER_IFLG_START |
SNDRV_TIMER_IFLG_RUNNING)
? "running" : "stopped");
}
mutex_unlock(®ister_mutex);
}
static struct snd_info_entry *snd_timer_proc_entry;
static void __init snd_timer_proc_init(void)
{
struct snd_info_entry *entry;
entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL);
if (entry != NULL) {
entry->c.text.read = snd_timer_proc_read;
if (snd_info_register(entry) < 0) {
snd_info_free_entry(entry);
entry = NULL;
}
}
snd_timer_proc_entry = entry;
}
static void __exit snd_timer_proc_done(void)
{
snd_info_free_entry(snd_timer_proc_entry);
}
#else /* !CONFIG_SND_PROC_FS */
#define snd_timer_proc_init()
#define snd_timer_proc_done()
#endif
/*
* USER SPACE interface
*/
static void snd_timer_user_interrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_read *r;
int prev;
spin_lock(&tu->qlock);
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->queue[prev];
if (r->resolution == resolution) {
r->ticks += ticks;
goto __wake;
}
}
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
r = &tu->queue[tu->qtail++];
tu->qtail %= tu->queue_size;
r->resolution = resolution;
r->ticks = ticks;
tu->qused++;
}
__wake:
spin_unlock(&tu->qlock);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_append_to_tqueue(struct snd_timer_user *tu,
struct snd_timer_tread *tread)
{
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread));
tu->qtail %= tu->queue_size;
tu->qused++;
}
}
static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
int event,
struct timespec *tstamp,
unsigned long resolution)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread r1;
unsigned long flags;
if (event >= SNDRV_TIMER_EVENT_START &&
event <= SNDRV_TIMER_EVENT_PAUSE)
tu->tstamp = *tstamp;
if ((tu->filter & (1 << event)) == 0 || !tu->tread)
return;
memset(&r1, 0, sizeof(r1));
r1.event = event;
r1.tstamp = *tstamp;
r1.val = resolution;
spin_lock_irqsave(&tu->qlock, flags);
snd_timer_user_append_to_tqueue(tu, &r1);
spin_unlock_irqrestore(&tu->qlock, flags);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_disconnect(struct snd_timer_instance *timeri)
{
struct snd_timer_user *tu = timeri->callback_data;
tu->disconnected = true;
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread *r, r1;
struct timespec tstamp;
int prev, append = 0;
memset(&tstamp, 0, sizeof(tstamp));
spin_lock(&tu->qlock);
if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) |
(1 << SNDRV_TIMER_EVENT_TICK))) == 0) {
spin_unlock(&tu->qlock);
return;
}
if (tu->last_resolution != resolution || ticks > 0) {
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
tu->last_resolution != resolution) {
r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
r1.tstamp = tstamp;
r1.val = resolution;
snd_timer_user_append_to_tqueue(tu, &r1);
tu->last_resolution = resolution;
append++;
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0)
goto __wake;
if (ticks == 0)
goto __wake;
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->tqueue[prev];
if (r->event == SNDRV_TIMER_EVENT_TICK) {
r->tstamp = tstamp;
r->val += ticks;
append++;
goto __wake;
}
}
r1.event = SNDRV_TIMER_EVENT_TICK;
r1.tstamp = tstamp;
r1.val = ticks;
snd_timer_user_append_to_tqueue(tu, &r1);
append++;
__wake:
spin_unlock(&tu->qlock);
if (append == 0)
return;
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static int snd_timer_user_open(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (tu == NULL)
return -ENOMEM;
spin_lock_init(&tu->qlock);
init_waitqueue_head(&tu->qchange_sleep);
mutex_init(&tu->ioctl_lock);
tu->ticks = 1;
tu->queue_size = 128;
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL) {
kfree(tu);
return -ENOMEM;
}
file->private_data = tu;
return 0;
}
static int snd_timer_user_release(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
if (file->private_data) {
tu = file->private_data;
file->private_data = NULL;
mutex_lock(&tu->ioctl_lock);
if (tu->timeri)
snd_timer_close(tu->timeri);
mutex_unlock(&tu->ioctl_lock);
kfree(tu->queue);
kfree(tu->tqueue);
kfree(tu);
}
return 0;
}
static void snd_timer_user_zero_id(struct snd_timer_id *id)
{
id->dev_class = SNDRV_TIMER_CLASS_NONE;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = -1;
id->device = -1;
id->subdevice = -1;
}
static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer)
{
id->dev_class = timer->tmr_class;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = timer->card ? timer->card->number : -1;
id->device = timer->tmr_device;
id->subdevice = timer->tmr_subdevice;
}
static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
{
struct snd_timer_id id;
struct snd_timer *timer;
struct list_head *p;
if (copy_from_user(&id, _tid, sizeof(id)))
return -EFAULT;
mutex_lock(®ister_mutex);
if (id.dev_class < 0) { /* first item */
if (list_empty(&snd_timer_list))
snd_timer_user_zero_id(&id);
else {
timer = list_entry(snd_timer_list.next,
struct snd_timer, device_list);
snd_timer_user_copy_id(&id, timer);
}
} else {
switch (id.dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
id.device = id.device < 0 ? 0 : id.device + 1;
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device >= id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (id.card < 0) {
id.card = 0;
} else {
if (id.card < 0) {
id.card = 0;
} else {
if (id.device < 0) {
id.device = 0;
} else {
if (id.subdevice < 0) {
id.subdevice = 0;
} else {
id.subdevice++;
}
}
}
}
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > id.dev_class) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_class < id.dev_class)
continue;
if (timer->card->number > id.card) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->card->number < id.card)
continue;
if (timer->tmr_device > id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device < id.device)
continue;
if (timer->tmr_subdevice > id.subdevice) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_subdevice < id.subdevice)
continue;
snd_timer_user_copy_id(&id, timer);
break;
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
default:
snd_timer_user_zero_id(&id);
}
}
mutex_unlock(®ister_mutex);
if (copy_to_user(_tid, &id, sizeof(*_tid)))
return -EFAULT;
return 0;
}
static int snd_timer_user_ginfo(struct file *file,
struct snd_timer_ginfo __user *_ginfo)
{
struct snd_timer_ginfo *ginfo;
struct snd_timer_id tid;
struct snd_timer *t;
struct list_head *p;
int err = 0;
ginfo = memdup_user(_ginfo, sizeof(*ginfo));
if (IS_ERR(ginfo))
return PTR_ERR(ginfo);
tid = ginfo->tid;
memset(ginfo, 0, sizeof(*ginfo));
ginfo->tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
ginfo->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
ginfo->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(ginfo->id, t->id, sizeof(ginfo->id));
strlcpy(ginfo->name, t->name, sizeof(ginfo->name));
ginfo->resolution = t->hw.resolution;
if (t->hw.resolution_min > 0) {
ginfo->resolution_min = t->hw.resolution_min;
ginfo->resolution_max = t->hw.resolution_max;
}
list_for_each(p, &t->open_list_head) {
ginfo->clients++;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_ginfo, ginfo, sizeof(*ginfo)))
err = -EFAULT;
kfree(ginfo);
return err;
}
static int timer_set_gparams(struct snd_timer_gparams *gparams)
{
struct snd_timer *t;
int err;
mutex_lock(®ister_mutex);
t = snd_timer_find(&gparams->tid);
if (!t) {
err = -ENODEV;
goto _error;
}
if (!list_empty(&t->open_list_head)) {
err = -EBUSY;
goto _error;
}
if (!t->hw.set_period) {
err = -ENOSYS;
goto _error;
}
err = t->hw.set_period(t, gparams->period_num, gparams->period_den);
_error:
mutex_unlock(®ister_mutex);
return err;
}
static int snd_timer_user_gparams(struct file *file,
struct snd_timer_gparams __user *_gparams)
{
struct snd_timer_gparams gparams;
if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
return -EFAULT;
return timer_set_gparams(&gparams);
}
static int snd_timer_user_gstatus(struct file *file,
struct snd_timer_gstatus __user *_gstatus)
{
struct snd_timer_gstatus gstatus;
struct snd_timer_id tid;
struct snd_timer *t;
int err = 0;
if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus)))
return -EFAULT;
tid = gstatus.tid;
memset(&gstatus, 0, sizeof(gstatus));
gstatus.tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
if (t->hw.c_resolution)
gstatus.resolution = t->hw.c_resolution(t);
else
gstatus.resolution = t->hw.resolution;
if (t->hw.precise_resolution) {
t->hw.precise_resolution(t, &gstatus.resolution_num,
&gstatus.resolution_den);
} else {
gstatus.resolution_num = gstatus.resolution;
gstatus.resolution_den = 1000000000uL;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus)))
err = -EFAULT;
return err;
}
static int snd_timer_user_tselect(struct file *file,
struct snd_timer_select __user *_tselect)
{
struct snd_timer_user *tu;
struct snd_timer_select tselect;
char str[32];
int err = 0;
tu = file->private_data;
if (tu->timeri) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
err = -EFAULT;
goto __err;
}
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
if (err < 0)
goto __err;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
tu->tqueue = NULL;
if (tu->tread) {
tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread),
GFP_KERNEL);
if (tu->tqueue == NULL)
err = -ENOMEM;
} else {
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL)
err = -ENOMEM;
}
if (err < 0) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
} else {
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
? snd_timer_user_tinterrupt : snd_timer_user_interrupt;
tu->timeri->ccallback = snd_timer_user_ccallback;
tu->timeri->callback_data = (void *)tu;
tu->timeri->disconnect = snd_timer_user_disconnect;
}
__err:
return err;
}
static int snd_timer_user_info(struct file *file,
struct snd_timer_info __user *_info)
{
struct snd_timer_user *tu;
struct snd_timer_info *info;
struct snd_timer *t;
int err = 0;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (! info)
return -ENOMEM;
info->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
info->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(info->id, t->id, sizeof(info->id));
strlcpy(info->name, t->name, sizeof(info->name));
info->resolution = t->hw.resolution;
if (copy_to_user(_info, info, sizeof(*_info)))
err = -EFAULT;
kfree(info);
return err;
}
static int snd_timer_user_params(struct file *file,
struct snd_timer_params __user *_params)
{
struct snd_timer_user *tu;
struct snd_timer_params params;
struct snd_timer *t;
struct snd_timer_read *tr;
struct snd_timer_tread *ttr;
int err;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
if (copy_from_user(¶ms, _params, sizeof(params)))
return -EFAULT;
if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) {
err = -EINVAL;
goto _end;
}
if (params.queue_size > 0 &&
(params.queue_size < 32 || params.queue_size > 1024)) {
err = -EINVAL;
goto _end;
}
if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)|
(1<<SNDRV_TIMER_EVENT_TICK)|
(1<<SNDRV_TIMER_EVENT_START)|
(1<<SNDRV_TIMER_EVENT_STOP)|
(1<<SNDRV_TIMER_EVENT_CONTINUE)|
(1<<SNDRV_TIMER_EVENT_PAUSE)|
(1<<SNDRV_TIMER_EVENT_SUSPEND)|
(1<<SNDRV_TIMER_EVENT_RESUME)|
(1<<SNDRV_TIMER_EVENT_MSTART)|
(1<<SNDRV_TIMER_EVENT_MSTOP)|
(1<<SNDRV_TIMER_EVENT_MCONTINUE)|
(1<<SNDRV_TIMER_EVENT_MPAUSE)|
(1<<SNDRV_TIMER_EVENT_MSUSPEND)|
(1<<SNDRV_TIMER_EVENT_MRESUME))) {
err = -EINVAL;
goto _end;
}
snd_timer_stop(tu->timeri);
spin_lock_irq(&t->lock);
tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO|
SNDRV_TIMER_IFLG_EXCLUSIVE|
SNDRV_TIMER_IFLG_EARLY_EVENT);
if (params.flags & SNDRV_TIMER_PSFLG_AUTO)
tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO;
if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE;
if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT;
spin_unlock_irq(&t->lock);
if (params.queue_size > 0 &&
(unsigned int)tu->queue_size != params.queue_size) {
if (tu->tread) {
ttr = kmalloc(params.queue_size * sizeof(*ttr),
GFP_KERNEL);
if (ttr) {
kfree(tu->tqueue);
tu->queue_size = params.queue_size;
tu->tqueue = ttr;
}
} else {
tr = kmalloc(params.queue_size * sizeof(*tr),
GFP_KERNEL);
if (tr) {
kfree(tu->queue);
tu->queue_size = params.queue_size;
tu->queue = tr;
}
}
}
tu->qhead = tu->qtail = tu->qused = 0;
if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
if (tu->tread) {
struct snd_timer_tread tread;
memset(&tread, 0, sizeof(tread));
tread.event = SNDRV_TIMER_EVENT_EARLY;
tread.tstamp.tv_sec = 0;
tread.tstamp.tv_nsec = 0;
tread.val = 0;
snd_timer_user_append_to_tqueue(tu, &tread);
} else {
struct snd_timer_read *r = &tu->queue[0];
r->resolution = 0;
r->ticks = 0;
tu->qused++;
tu->qtail++;
}
}
tu->filter = params.filter;
tu->ticks = params.ticks;
err = 0;
_end:
if (copy_to_user(_params, ¶ms, sizeof(params)))
return -EFAULT;
return err;
}
static int snd_timer_user_status(struct file *file,
struct snd_timer_status __user *_status)
{
struct snd_timer_user *tu;
struct snd_timer_status status;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
memset(&status, 0, sizeof(status));
status.tstamp = tu->tstamp;
status.resolution = snd_timer_resolution(tu->timeri);
status.lost = tu->timeri->lost;
status.overrun = tu->overrun;
spin_lock_irq(&tu->qlock);
status.queue = tu->qused;
spin_unlock_irq(&tu->qlock);
if (copy_to_user(_status, &status, sizeof(status)))
return -EFAULT;
return 0;
}
static int snd_timer_user_start(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
snd_timer_stop(tu->timeri);
tu->timeri->lost = 0;
tu->last_resolution = 0;
return (err = snd_timer_start(tu->timeri, tu->ticks)) < 0 ? err : 0;
}
static int snd_timer_user_stop(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_stop(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_continue(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
tu->timeri->lost = 0;
return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_pause(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_pause(tu->timeri)) < 0 ? err : 0;
}
enum {
SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20),
SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21),
SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22),
SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
};
static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu;
void __user *argp = (void __user *)arg;
int __user *p = argp;
tu = file->private_data;
switch (cmd) {
case SNDRV_TIMER_IOCTL_PVERSION:
return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0;
case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
return snd_timer_user_next_device(argp);
case SNDRV_TIMER_IOCTL_TREAD:
{
int xarg;
if (tu->timeri) /* too late */
return -EBUSY;
if (get_user(xarg, p))
return -EFAULT;
tu->tread = xarg ? 1 : 0;
return 0;
}
case SNDRV_TIMER_IOCTL_GINFO:
return snd_timer_user_ginfo(file, argp);
case SNDRV_TIMER_IOCTL_GPARAMS:
return snd_timer_user_gparams(file, argp);
case SNDRV_TIMER_IOCTL_GSTATUS:
return snd_timer_user_gstatus(file, argp);
case SNDRV_TIMER_IOCTL_SELECT:
return snd_timer_user_tselect(file, argp);
case SNDRV_TIMER_IOCTL_INFO:
return snd_timer_user_info(file, argp);
case SNDRV_TIMER_IOCTL_PARAMS:
return snd_timer_user_params(file, argp);
case SNDRV_TIMER_IOCTL_STATUS:
return snd_timer_user_status(file, argp);
case SNDRV_TIMER_IOCTL_START:
case SNDRV_TIMER_IOCTL_START_OLD:
return snd_timer_user_start(file);
case SNDRV_TIMER_IOCTL_STOP:
case SNDRV_TIMER_IOCTL_STOP_OLD:
return snd_timer_user_stop(file);
case SNDRV_TIMER_IOCTL_CONTINUE:
case SNDRV_TIMER_IOCTL_CONTINUE_OLD:
return snd_timer_user_continue(file);
case SNDRV_TIMER_IOCTL_PAUSE:
case SNDRV_TIMER_IOCTL_PAUSE_OLD:
return snd_timer_user_pause(file);
}
return -ENOTTY;
}
static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu = file->private_data;
long ret;
mutex_lock(&tu->ioctl_lock);
ret = __snd_timer_user_ioctl(file, cmd, arg);
mutex_unlock(&tu->ioctl_lock);
return ret;
}
static int snd_timer_user_fasync(int fd, struct file * file, int on)
{
struct snd_timer_user *tu;
tu = file->private_data;
return fasync_helper(fd, file, on, &tu->fasync);
}
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
size_t count, loff_t *offset)
{
struct snd_timer_user *tu;
long result = 0, unit;
int qhead;
int err = 0;
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto _error;
}
set_current_state(TASK_INTERRUPTIBLE);
init_waitqueue_entry(&wait, current);
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
schedule();
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
if (tu->disconnected) {
err = -ENODEV;
goto _error;
}
if (signal_pending(current)) {
err = -ERESTARTSYS;
goto _error;
}
}
qhead = tu->qhead++;
tu->qhead %= tu->queue_size;
spin_unlock_irq(&tu->qlock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
err = -EFAULT;
} else {
if (copy_to_user(buffer, &tu->queue[qhead],
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
spin_lock_irq(&tu->qlock);
tu->qused--;
if (err < 0)
goto _error;
result += unit;
buffer += unit;
}
_error:
spin_unlock_irq(&tu->qlock);
return result > 0 ? result : err;
}
static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
{
unsigned int mask;
struct snd_timer_user *tu;
tu = file->private_data;
poll_wait(file, &tu->qchange_sleep, wait);
mask = 0;
if (tu->qused)
mask |= POLLIN | POLLRDNORM;
if (tu->disconnected)
mask |= POLLERR;
return mask;
}
#ifdef CONFIG_COMPAT
#include "timer_compat.c"
#else
#define snd_timer_user_ioctl_compat NULL
#endif
static const struct file_operations snd_timer_f_ops =
{
.owner = THIS_MODULE,
.read = snd_timer_user_read,
.open = snd_timer_user_open,
.release = snd_timer_user_release,
.llseek = no_llseek,
.poll = snd_timer_user_poll,
.unlocked_ioctl = snd_timer_user_ioctl,
.compat_ioctl = snd_timer_user_ioctl_compat,
.fasync = snd_timer_user_fasync,
};
/* unregister the system timer */
static void snd_timer_free_all(void)
{
struct snd_timer *timer, *n;
list_for_each_entry_safe(timer, n, &snd_timer_list, device_list)
snd_timer_free(timer);
}
static struct device timer_dev;
/*
* ENTRY functions
*/
static int __init alsa_timer_init(void)
{
int err;
snd_device_initialize(&timer_dev, NULL);
dev_set_name(&timer_dev, "timer");
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1,
"system timer");
#endif
err = snd_timer_register_system();
if (err < 0) {
pr_err("ALSA: unable to register system timer (%i)\n", err);
put_device(&timer_dev);
return err;
}
err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0,
&snd_timer_f_ops, NULL, &timer_dev);
if (err < 0) {
pr_err("ALSA: unable to register timer device (%i)\n", err);
snd_timer_free_all();
put_device(&timer_dev);
return err;
}
snd_timer_proc_init();
return 0;
}
static void __exit alsa_timer_exit(void)
{
snd_unregister_device(&timer_dev);
snd_timer_free_all();
put_device(&timer_dev);
snd_timer_proc_done();
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1);
#endif
}
module_init(alsa_timer_init)
module_exit(alsa_timer_exit)
EXPORT_SYMBOL(snd_timer_open);
EXPORT_SYMBOL(snd_timer_close);
EXPORT_SYMBOL(snd_timer_resolution);
EXPORT_SYMBOL(snd_timer_start);
EXPORT_SYMBOL(snd_timer_stop);
EXPORT_SYMBOL(snd_timer_continue);
EXPORT_SYMBOL(snd_timer_pause);
EXPORT_SYMBOL(snd_timer_new);
EXPORT_SYMBOL(snd_timer_notify);
EXPORT_SYMBOL(snd_timer_global_new);
EXPORT_SYMBOL(snd_timer_global_free);
EXPORT_SYMBOL(snd_timer_global_register);
EXPORT_SYMBOL(snd_timer_interrupt);
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_5059_0 |
crossvul-cpp_data_good_5686_0 | /*
* Copyright (C) ST-Ericsson AB 2010
* Author: Sjur Brendeland sjur.brandeland@stericsson.com
* License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/tcp.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/caif/caif_socket.h>
#include <linux/pkt_sched.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/caif/caif_layer.h>
#include <net/caif/caif_dev.h>
#include <net/caif/cfpkt.h>
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(AF_CAIF);
/*
* CAIF state is re-using the TCP socket states.
* caif_states stored in sk_state reflect the state as reported by
* the CAIF stack, while sk_socket->state is the state of the socket.
*/
enum caif_states {
CAIF_CONNECTED = TCP_ESTABLISHED,
CAIF_CONNECTING = TCP_SYN_SENT,
CAIF_DISCONNECTED = TCP_CLOSE
};
#define TX_FLOW_ON_BIT 1
#define RX_FLOW_ON_BIT 2
struct caifsock {
struct sock sk; /* must be first member */
struct cflayer layer;
u32 flow_state;
struct caif_connect_request conn_req;
struct mutex readlock;
struct dentry *debugfs_socket_dir;
int headroom, tailroom, maxframe;
};
static int rx_flow_is_on(struct caifsock *cf_sk)
{
return test_bit(RX_FLOW_ON_BIT,
(void *) &cf_sk->flow_state);
}
static int tx_flow_is_on(struct caifsock *cf_sk)
{
return test_bit(TX_FLOW_ON_BIT,
(void *) &cf_sk->flow_state);
}
static void set_rx_flow_off(struct caifsock *cf_sk)
{
clear_bit(RX_FLOW_ON_BIT,
(void *) &cf_sk->flow_state);
}
static void set_rx_flow_on(struct caifsock *cf_sk)
{
set_bit(RX_FLOW_ON_BIT,
(void *) &cf_sk->flow_state);
}
static void set_tx_flow_off(struct caifsock *cf_sk)
{
clear_bit(TX_FLOW_ON_BIT,
(void *) &cf_sk->flow_state);
}
static void set_tx_flow_on(struct caifsock *cf_sk)
{
set_bit(TX_FLOW_ON_BIT,
(void *) &cf_sk->flow_state);
}
static void caif_read_lock(struct sock *sk)
{
struct caifsock *cf_sk;
cf_sk = container_of(sk, struct caifsock, sk);
mutex_lock(&cf_sk->readlock);
}
static void caif_read_unlock(struct sock *sk)
{
struct caifsock *cf_sk;
cf_sk = container_of(sk, struct caifsock, sk);
mutex_unlock(&cf_sk->readlock);
}
static int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
{
/* A quarter of full buffer is used a low water mark */
return cf_sk->sk.sk_rcvbuf / 4;
}
static void caif_flow_ctrl(struct sock *sk, int mode)
{
struct caifsock *cf_sk;
cf_sk = container_of(sk, struct caifsock, sk);
if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd)
cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
}
/*
* Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
* not dropped, but CAIF is sending flow off instead.
*/
static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int err;
int skb_len;
unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
net_dbg_ratelimited("sending flow OFF (queue len = %d %d)\n",
atomic_read(&cf_sk->sk.sk_rmem_alloc),
sk_rcvbuf_lowwater(cf_sk));
set_rx_flow_off(cf_sk);
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
err = sk_filter(sk, skb);
if (err)
return err;
if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
set_rx_flow_off(cf_sk);
net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
skb->dev = NULL;
skb_set_owner_r(skb, sk);
/* Cache the SKB length before we tack it onto the receive
* queue. Once it is added it no longer belongs to us and
* may be freed by other threads of control pulling packets
* from the queue.
*/
skb_len = skb->len;
spin_lock_irqsave(&list->lock, flags);
if (!sock_flag(sk, SOCK_DEAD))
__skb_queue_tail(list, skb);
spin_unlock_irqrestore(&list->lock, flags);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb_len);
else
kfree_skb(skb);
return 0;
}
/* Packet Receive Callback function called from CAIF Stack */
static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
{
struct caifsock *cf_sk;
struct sk_buff *skb;
cf_sk = container_of(layr, struct caifsock, layer);
skb = cfpkt_tonative(pkt);
if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) {
kfree_skb(skb);
return 0;
}
caif_queue_rcv_skb(&cf_sk->sk, skb);
return 0;
}
static void cfsk_hold(struct cflayer *layr)
{
struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
sock_hold(&cf_sk->sk);
}
static void cfsk_put(struct cflayer *layr)
{
struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
sock_put(&cf_sk->sk);
}
/* Packet Control Callback function called from CAIF */
static void caif_ctrl_cb(struct cflayer *layr,
enum caif_ctrlcmd flow,
int phyid)
{
struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
switch (flow) {
case CAIF_CTRLCMD_FLOW_ON_IND:
/* OK from modem to start sending again */
set_tx_flow_on(cf_sk);
cf_sk->sk.sk_state_change(&cf_sk->sk);
break;
case CAIF_CTRLCMD_FLOW_OFF_IND:
/* Modem asks us to shut up */
set_tx_flow_off(cf_sk);
cf_sk->sk.sk_state_change(&cf_sk->sk);
break;
case CAIF_CTRLCMD_INIT_RSP:
/* We're now connected */
caif_client_register_refcnt(&cf_sk->layer,
cfsk_hold, cfsk_put);
cf_sk->sk.sk_state = CAIF_CONNECTED;
set_tx_flow_on(cf_sk);
cf_sk->sk.sk_shutdown = 0;
cf_sk->sk.sk_state_change(&cf_sk->sk);
break;
case CAIF_CTRLCMD_DEINIT_RSP:
/* We're now disconnected */
cf_sk->sk.sk_state = CAIF_DISCONNECTED;
cf_sk->sk.sk_state_change(&cf_sk->sk);
break;
case CAIF_CTRLCMD_INIT_FAIL_RSP:
/* Connect request failed */
cf_sk->sk.sk_err = ECONNREFUSED;
cf_sk->sk.sk_state = CAIF_DISCONNECTED;
cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
/*
* Socket "standards" seems to require POLLOUT to
* be set at connect failure.
*/
set_tx_flow_on(cf_sk);
cf_sk->sk.sk_state_change(&cf_sk->sk);
break;
case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
/* Modem has closed this connection, or device is down. */
cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
cf_sk->sk.sk_err = ECONNRESET;
set_rx_flow_on(cf_sk);
cf_sk->sk.sk_error_report(&cf_sk->sk);
break;
default:
pr_debug("Unexpected flow command %d\n", flow);
}
}
static void caif_check_flow_release(struct sock *sk)
{
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
if (rx_flow_is_on(cf_sk))
return;
if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
set_rx_flow_on(cf_sk);
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
}
}
/*
* Copied from unix_dgram_recvmsg, but removed credit checks,
* changed locking, address handling and added MSG_TRUNC.
*/
static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int ret;
int copylen;
ret = -EOPNOTSUPP;
if (m->msg_flags&MSG_OOB)
goto read_error;
m->msg_namelen = 0;
skb = skb_recv_datagram(sk, flags, 0 , &ret);
if (!skb)
goto read_error;
copylen = skb->len;
if (len < copylen) {
m->msg_flags |= MSG_TRUNC;
copylen = len;
}
ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
if (ret)
goto out_free;
ret = (flags & MSG_TRUNC) ? skb->len : copylen;
out_free:
skb_free_datagram(sk, skb);
caif_check_flow_release(sk);
return ret;
read_error:
return ret;
}
/* Copied from unix_stream_wait_data, identical except for lock call. */
static long caif_stream_data_wait(struct sock *sk, long timeo)
{
DEFINE_WAIT(wait);
lock_sock(sk);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (!skb_queue_empty(&sk->sk_receive_queue) ||
sk->sk_err ||
sk->sk_state != CAIF_CONNECTED ||
sock_flag(sk, SOCK_DEAD) ||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current) ||
!timeo)
break;
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
}
finish_wait(sk_sleep(sk), &wait);
release_sock(sk);
return timeo;
}
/*
* Copied from unix_stream_recvmsg, but removed credit checks,
* changed locking calls, changed address handling.
*/
static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size,
int flags)
{
struct sock *sk = sock->sk;
int copied = 0;
int target;
int err = 0;
long timeo;
err = -EOPNOTSUPP;
if (flags&MSG_OOB)
goto out;
msg->msg_namelen = 0;
/*
* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
*/
err = -EAGAIN;
if (sk->sk_state == CAIF_CONNECTING)
goto out;
caif_read_lock(sk);
target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
do {
int chunk;
struct sk_buff *skb;
lock_sock(sk);
skb = skb_dequeue(&sk->sk_receive_queue);
caif_check_flow_release(sk);
if (skb == NULL) {
if (copied >= target)
goto unlock;
/*
* POSIX 1003.1g mandates this order.
*/
err = sock_error(sk);
if (err)
goto unlock;
err = -ECONNRESET;
if (sk->sk_shutdown & RCV_SHUTDOWN)
goto unlock;
err = -EPIPE;
if (sk->sk_state != CAIF_CONNECTED)
goto unlock;
if (sock_flag(sk, SOCK_DEAD))
goto unlock;
release_sock(sk);
err = -EAGAIN;
if (!timeo)
break;
caif_read_unlock(sk);
timeo = caif_stream_data_wait(sk, timeo);
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
goto out;
}
caif_read_lock(sk);
continue;
unlock:
release_sock(sk);
break;
}
release_sock(sk);
chunk = min_t(unsigned int, skb->len, size);
if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
if (copied == 0)
copied = -EFAULT;
break;
}
copied += chunk;
size -= chunk;
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
skb_pull(skb, chunk);
/* put the skb back if we didn't use it up. */
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
kfree_skb(skb);
} else {
/*
* It is questionable, see note in unix_dgram_recvmsg.
*/
/* put message back and return */
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} while (size);
caif_read_unlock(sk);
out:
return copied ? : err;
}
/*
* Copied from sock.c:sock_wait_for_wmem, but change to wait for
* CAIF flow-on and sock_writable.
*/
static long caif_wait_for_flow_on(struct caifsock *cf_sk,
int wait_writeable, long timeo, int *err)
{
struct sock *sk = &cf_sk->sk;
DEFINE_WAIT(wait);
for (;;) {
*err = 0;
if (tx_flow_is_on(cf_sk) &&
(!wait_writeable || sock_writeable(&cf_sk->sk)))
break;
*err = -ETIMEDOUT;
if (!timeo)
break;
*err = -ERESTARTSYS;
if (signal_pending(current))
break;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
*err = -ECONNRESET;
if (sk->sk_shutdown & SHUTDOWN_MASK)
break;
*err = -sk->sk_err;
if (sk->sk_err)
break;
*err = -EPIPE;
if (cf_sk->sk.sk_state != CAIF_CONNECTED)
break;
timeo = schedule_timeout(timeo);
}
finish_wait(sk_sleep(sk), &wait);
return timeo;
}
/*
* Transmit a SKB. The device may temporarily request re-transmission
* by returning EAGAIN.
*/
static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
int noblock, long timeo)
{
struct cfpkt *pkt;
pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
memset(skb->cb, 0, sizeof(struct caif_payload_info));
cfpkt_set_prio(pkt, cf_sk->sk.sk_priority);
if (cf_sk->layer.dn == NULL) {
kfree_skb(skb);
return -EINVAL;
}
return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
}
/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
int buffer_size;
int ret = 0;
struct sk_buff *skb = NULL;
int noblock;
long timeo;
caif_assert(cf_sk);
ret = sock_error(sk);
if (ret)
goto err;
ret = -EOPNOTSUPP;
if (msg->msg_flags&MSG_OOB)
goto err;
ret = -EOPNOTSUPP;
if (msg->msg_namelen)
goto err;
ret = -EINVAL;
if (unlikely(msg->msg_iov->iov_base == NULL))
goto err;
noblock = msg->msg_flags & MSG_DONTWAIT;
timeo = sock_sndtimeo(sk, noblock);
timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk),
1, timeo, &ret);
if (ret)
goto err;
ret = -EPIPE;
if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
sock_flag(sk, SOCK_DEAD) ||
(sk->sk_shutdown & RCV_SHUTDOWN))
goto err;
/* Error if trying to write more than maximum frame size. */
ret = -EMSGSIZE;
if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM)
goto err;
buffer_size = len + cf_sk->headroom + cf_sk->tailroom;
ret = -ENOMEM;
skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
if (!skb || skb_tailroom(skb) < buffer_size)
goto err;
skb_reserve(skb, cf_sk->headroom);
ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (ret)
goto err;
ret = transmit_skb(skb, cf_sk, noblock, timeo);
if (ret < 0)
/* skb is already freed */
return ret;
return len;
err:
kfree_skb(skb);
return ret;
}
/*
* Copied from unix_stream_sendmsg and adapted to CAIF:
* Changed removed permission handling and added waiting for flow on
* and other minor adaptations.
*/
static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
int err, size;
struct sk_buff *skb;
int sent = 0;
long timeo;
err = -EOPNOTSUPP;
if (unlikely(msg->msg_flags&MSG_OOB))
goto out_err;
if (unlikely(msg->msg_namelen))
goto out_err;
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err);
if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
goto pipe_err;
while (sent < len) {
size = len-sent;
if (size > cf_sk->maxframe)
size = cf_sk->maxframe;
/* If size is more than half of sndbuf, chop up message */
if (size > ((sk->sk_sndbuf >> 1) - 64))
size = (sk->sk_sndbuf >> 1) - 64;
if (size > SKB_MAX_ALLOC)
size = SKB_MAX_ALLOC;
skb = sock_alloc_send_skb(sk,
size + cf_sk->headroom +
cf_sk->tailroom,
msg->msg_flags&MSG_DONTWAIT,
&err);
if (skb == NULL)
goto out_err;
skb_reserve(skb, cf_sk->headroom);
/*
* If you pass two values to the sock_alloc_send_skb
* it tries to grab the large buffer with GFP_NOFS
* (which can fail easily), and if it fails grab the
* fallback size buffer which is under a page and will
* succeed. [Alan]
*/
size = min_t(int, size, skb_tailroom(skb));
err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
if (err) {
kfree_skb(skb);
goto out_err;
}
err = transmit_skb(skb, cf_sk,
msg->msg_flags&MSG_DONTWAIT, timeo);
if (err < 0)
/* skb is already freed */
goto pipe_err;
sent += size;
}
return sent;
pipe_err:
if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
send_sig(SIGPIPE, current, 0);
err = -EPIPE;
out_err:
return sent ? : err;
}
static int setsockopt(struct socket *sock,
int lvl, int opt, char __user *ov, unsigned int ol)
{
struct sock *sk = sock->sk;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
int linksel;
if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
return -ENOPROTOOPT;
switch (opt) {
case CAIFSO_LINK_SELECT:
if (ol < sizeof(int))
return -EINVAL;
if (lvl != SOL_CAIF)
goto bad_sol;
if (copy_from_user(&linksel, ov, sizeof(int)))
return -EINVAL;
lock_sock(&(cf_sk->sk));
cf_sk->conn_req.link_selector = linksel;
release_sock(&cf_sk->sk);
return 0;
case CAIFSO_REQ_PARAM:
if (lvl != SOL_CAIF)
goto bad_sol;
if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
return -ENOPROTOOPT;
lock_sock(&(cf_sk->sk));
if (ol > sizeof(cf_sk->conn_req.param.data) ||
copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
release_sock(&cf_sk->sk);
return -EINVAL;
}
cf_sk->conn_req.param.size = ol;
release_sock(&cf_sk->sk);
return 0;
default:
return -ENOPROTOOPT;
}
return 0;
bad_sol:
return -ENOPROTOOPT;
}
/*
* caif_connect() - Connect a CAIF Socket
* Copied and modified af_irda.c:irda_connect().
*
* Note : by consulting "errno", the user space caller may learn the cause
* of the failure. Most of them are visible in the function, others may come
* from subroutines called and are listed here :
* o -EAFNOSUPPORT: bad socket family or type.
* o -ESOCKTNOSUPPORT: bad socket type or protocol
* o -EINVAL: bad socket address, or CAIF link type
* o -ECONNREFUSED: remote end refused the connection.
* o -EINPROGRESS: connect request sent but timed out (or non-blocking)
* o -EISCONN: already connected.
* o -ETIMEDOUT: Connection timed out (send timeout)
* o -ENODEV: No link layer to send request
* o -ECONNRESET: Received Shutdown indication or lost link layer
* o -ENOMEM: Out of memory
*
* State Strategy:
* o sk_state: holds the CAIF_* protocol state, it's updated by
* caif_ctrl_cb.
* o sock->state: holds the SS_* socket state and is updated by connect and
* disconnect.
*/
static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
long timeo;
int err;
int ifindex, headroom, tailroom;
unsigned int mtu;
struct net_device *dev;
lock_sock(sk);
err = -EAFNOSUPPORT;
if (uaddr->sa_family != AF_CAIF)
goto out;
switch (sock->state) {
case SS_UNCONNECTED:
/* Normal case, a fresh connect */
caif_assert(sk->sk_state == CAIF_DISCONNECTED);
break;
case SS_CONNECTING:
switch (sk->sk_state) {
case CAIF_CONNECTED:
sock->state = SS_CONNECTED;
err = -EISCONN;
goto out;
case CAIF_DISCONNECTED:
/* Reconnect allowed */
break;
case CAIF_CONNECTING:
err = -EALREADY;
if (flags & O_NONBLOCK)
goto out;
goto wait_connect;
}
break;
case SS_CONNECTED:
caif_assert(sk->sk_state == CAIF_CONNECTED ||
sk->sk_state == CAIF_DISCONNECTED);
if (sk->sk_shutdown & SHUTDOWN_MASK) {
/* Allow re-connect after SHUTDOWN_IND */
caif_disconnect_client(sock_net(sk), &cf_sk->layer);
caif_free_client(&cf_sk->layer);
break;
}
/* No reconnect on a seqpacket socket */
err = -EISCONN;
goto out;
case SS_DISCONNECTING:
case SS_FREE:
caif_assert(1); /*Should never happen */
break;
}
sk->sk_state = CAIF_DISCONNECTED;
sock->state = SS_UNCONNECTED;
sk_stream_kill_queues(&cf_sk->sk);
err = -EINVAL;
if (addr_len != sizeof(struct sockaddr_caif))
goto out;
memcpy(&cf_sk->conn_req.sockaddr, uaddr,
sizeof(struct sockaddr_caif));
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
sk->sk_state = CAIF_CONNECTING;
/* Check priority value comming from socket */
/* if priority value is out of range it will be ajusted */
if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
cf_sk->conn_req.priority = CAIF_PRIO_MAX;
else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN)
cf_sk->conn_req.priority = CAIF_PRIO_MIN;
else
cf_sk->conn_req.priority = cf_sk->sk.sk_priority;
/*ifindex = id of the interface.*/
cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
cf_sk->layer.receive = caif_sktrecv_cb;
err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
&cf_sk->layer, &ifindex, &headroom, &tailroom);
if (err < 0) {
cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
cf_sk->sk.sk_state = CAIF_DISCONNECTED;
goto out;
}
err = -ENODEV;
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
if (!dev) {
rcu_read_unlock();
goto out;
}
cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom);
mtu = dev->mtu;
rcu_read_unlock();
cf_sk->tailroom = tailroom;
cf_sk->maxframe = mtu - (headroom + tailroom);
if (cf_sk->maxframe < 1) {
pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu);
err = -ENODEV;
goto out;
}
err = -EINPROGRESS;
wait_connect:
if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK))
goto out;
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
release_sock(sk);
err = -ERESTARTSYS;
timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
sk->sk_state != CAIF_CONNECTING,
timeo);
lock_sock(sk);
if (timeo < 0)
goto out; /* -ERESTARTSYS */
err = -ETIMEDOUT;
if (timeo == 0 && sk->sk_state != CAIF_CONNECTED)
goto out;
if (sk->sk_state != CAIF_CONNECTED) {
sock->state = SS_UNCONNECTED;
err = sock_error(sk);
if (!err)
err = -ECONNREFUSED;
goto out;
}
sock->state = SS_CONNECTED;
err = 0;
out:
release_sock(sk);
return err;
}
/*
* caif_release() - Disconnect a CAIF Socket
* Copied and modified af_irda.c:irda_release().
*/
static int caif_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
if (!sk)
return 0;
set_tx_flow_off(cf_sk);
/*
* Ensure that packets are not queued after this point in time.
* caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock,
* this ensures no packets when sock is dead.
*/
spin_lock_bh(&sk->sk_receive_queue.lock);
sock_set_flag(sk, SOCK_DEAD);
spin_unlock_bh(&sk->sk_receive_queue.lock);
sock->sk = NULL;
WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
if (cf_sk->debugfs_socket_dir != NULL)
debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
lock_sock(&(cf_sk->sk));
sk->sk_state = CAIF_DISCONNECTED;
sk->sk_shutdown = SHUTDOWN_MASK;
caif_disconnect_client(sock_net(sk), &cf_sk->layer);
cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
sock_orphan(sk);
sk_stream_kill_queues(&cf_sk->sk);
release_sock(sk);
sock_put(sk);
return 0;
}
/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
static unsigned int caif_poll(struct file *file,
struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* exceptional events? */
if (sk->sk_err)
mask |= POLLERR;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLRDHUP;
/* readable? */
if (!skb_queue_empty(&sk->sk_receive_queue) ||
(sk->sk_shutdown & RCV_SHUTDOWN))
mask |= POLLIN | POLLRDNORM;
/*
* we set writable also when the other side has shut down the
* connection. This prevents stuck sockets.
*/
if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
return mask;
}
static const struct proto_ops caif_seqpacket_ops = {
.family = PF_CAIF,
.owner = THIS_MODULE,
.release = caif_release,
.bind = sock_no_bind,
.connect = caif_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
.poll = caif_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = caif_seqpkt_sendmsg,
.recvmsg = caif_seqpkt_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static const struct proto_ops caif_stream_ops = {
.family = PF_CAIF,
.owner = THIS_MODULE,
.release = caif_release,
.bind = sock_no_bind,
.connect = caif_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
.poll = caif_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = caif_stream_sendmsg,
.recvmsg = caif_stream_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
/* This function is called when a socket is finally destroyed. */
static void caif_sock_destructor(struct sock *sk)
{
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
caif_assert(!atomic_read(&sk->sk_wmem_alloc));
caif_assert(sk_unhashed(sk));
caif_assert(!sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
pr_debug("Attempt to release alive CAIF socket: %p\n", sk);
return;
}
sk_stream_kill_queues(&cf_sk->sk);
caif_free_client(&cf_sk->layer);
}
static int caif_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk = NULL;
struct caifsock *cf_sk = NULL;
static struct proto prot = {.name = "PF_CAIF",
.owner = THIS_MODULE,
.obj_size = sizeof(struct caifsock),
};
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN))
return -EPERM;
/*
* The sock->type specifies the socket type to use.
* The CAIF socket is a packet stream in the sense
* that it is packet based. CAIF trusts the reliability
* of the link, no resending is implemented.
*/
if (sock->type == SOCK_SEQPACKET)
sock->ops = &caif_seqpacket_ops;
else if (sock->type == SOCK_STREAM)
sock->ops = &caif_stream_ops;
else
return -ESOCKTNOSUPPORT;
if (protocol < 0 || protocol >= CAIFPROTO_MAX)
return -EPROTONOSUPPORT;
/*
* Set the socket state to unconnected. The socket state
* is really not used at all in the net/core or socket.c but the
* initialization makes sure that sock->state is not uninitialized.
*/
sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
if (!sk)
return -ENOMEM;
cf_sk = container_of(sk, struct caifsock, sk);
/* Store the protocol */
sk->sk_protocol = (unsigned char) protocol;
/* Initialize default priority for well-known cases */
switch (protocol) {
case CAIFPROTO_AT:
sk->sk_priority = TC_PRIO_CONTROL;
break;
case CAIFPROTO_RFM:
sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
break;
default:
sk->sk_priority = TC_PRIO_BESTEFFORT;
}
/*
* Lock in order to try to stop someone from opening the socket
* too early.
*/
lock_sock(&(cf_sk->sk));
/* Initialize the nozero default sock structure data. */
sock_init_data(sock, sk);
sk->sk_destruct = caif_sock_destructor;
mutex_init(&cf_sk->readlock); /* single task reading lock */
cf_sk->layer.ctrlcmd = caif_ctrl_cb;
cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
cf_sk->sk.sk_state = CAIF_DISCONNECTED;
set_tx_flow_off(cf_sk);
set_rx_flow_on(cf_sk);
/* Set default options on configuration */
cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
cf_sk->conn_req.protocol = protocol;
release_sock(&cf_sk->sk);
return 0;
}
static struct net_proto_family caif_family_ops = {
.family = PF_CAIF,
.create = caif_create,
.owner = THIS_MODULE,
};
static int __init caif_sktinit_module(void)
{
int err = sock_register(&caif_family_ops);
if (!err)
return err;
return 0;
}
static void __exit caif_sktexit_module(void)
{
sock_unregister(PF_CAIF);
}
module_init(caif_sktinit_module);
module_exit(caif_sktexit_module);
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_5686_0 |
crossvul-cpp_data_good_3842_0 | /*
* fs/isofs/export.c
*
* (C) 2004 Paul Serice - The new inode scheme requires switching
* from iget() to iget5_locked() which means
* the NFS export operations have to be hand
* coded because the default routines rely on
* iget().
*
* The following files are helpful:
*
* Documentation/filesystems/nfs/Exporting
* fs/exportfs/expfs.c.
*/
#include "isofs.h"
static struct dentry *
isofs_export_iget(struct super_block *sb,
unsigned long block,
unsigned long offset,
__u32 generation)
{
struct inode *inode;
if (block == 0)
return ERR_PTR(-ESTALE);
inode = isofs_iget(sb, block, offset);
if (IS_ERR(inode))
return ERR_CAST(inode);
if (generation && inode->i_generation != generation) {
iput(inode);
return ERR_PTR(-ESTALE);
}
return d_obtain_alias(inode);
}
/* This function is surprisingly simple. The trick is understanding
* that "child" is always a directory. So, to find its parent, you
* simply need to find its ".." entry, normalize its block and offset,
* and return the underlying inode. See the comments for
* isofs_normalize_block_and_offset(). */
static struct dentry *isofs_export_get_parent(struct dentry *child)
{
unsigned long parent_block = 0;
unsigned long parent_offset = 0;
struct inode *child_inode = child->d_inode;
struct iso_inode_info *e_child_inode = ISOFS_I(child_inode);
struct iso_directory_record *de = NULL;
struct buffer_head * bh = NULL;
struct dentry *rv = NULL;
/* "child" must always be a directory. */
if (!S_ISDIR(child_inode->i_mode)) {
printk(KERN_ERR "isofs: isofs_export_get_parent(): "
"child is not a directory!\n");
rv = ERR_PTR(-EACCES);
goto out;
}
/* It is an invariant that the directory offset is zero. If
* it is not zero, it means the directory failed to be
* normalized for some reason. */
if (e_child_inode->i_iget5_offset != 0) {
printk(KERN_ERR "isofs: isofs_export_get_parent(): "
"child directory not normalized!\n");
rv = ERR_PTR(-EACCES);
goto out;
}
/* The child inode has been normalized such that its
* i_iget5_block value points to the "." entry. Fortunately,
* the ".." entry is located in the same block. */
parent_block = e_child_inode->i_iget5_block;
/* Get the block in question. */
bh = sb_bread(child_inode->i_sb, parent_block);
if (bh == NULL) {
rv = ERR_PTR(-EACCES);
goto out;
}
/* This is the "." entry. */
de = (struct iso_directory_record*)bh->b_data;
/* The ".." entry is always the second entry. */
parent_offset = (unsigned long)isonum_711(de->length);
de = (struct iso_directory_record*)(bh->b_data + parent_offset);
/* Verify it is in fact the ".." entry. */
if ((isonum_711(de->name_len) != 1) || (de->name[0] != 1)) {
printk(KERN_ERR "isofs: Unable to find the \"..\" "
"directory for NFS.\n");
rv = ERR_PTR(-EACCES);
goto out;
}
/* Normalize */
isofs_normalize_block_and_offset(de, &parent_block, &parent_offset);
rv = d_obtain_alias(isofs_iget(child_inode->i_sb, parent_block,
parent_offset));
out:
if (bh)
brelse(bh);
return rv;
}
static int
isofs_export_encode_fh(struct inode *inode,
__u32 *fh32,
int *max_len,
struct inode *parent)
{
struct iso_inode_info * ei = ISOFS_I(inode);
int len = *max_len;
int type = 1;
__u16 *fh16 = (__u16*)fh32;
/*
* WARNING: max_len is 5 for NFSv2. Because of this
* limitation, we use the lower 16 bits of fh32[1] to hold the
* offset of the inode and the upper 16 bits of fh32[1] to
* hold the offset of the parent.
*/
if (parent && (len < 5)) {
*max_len = 5;
return 255;
} else if (len < 3) {
*max_len = 3;
return 255;
}
len = 3;
fh32[0] = ei->i_iget5_block;
fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */
fh16[3] = 0; /* avoid leaking uninitialized data */
fh32[2] = inode->i_generation;
if (parent) {
struct iso_inode_info *eparent;
eparent = ISOFS_I(parent);
fh32[3] = eparent->i_iget5_block;
fh16[3] = (__u16)eparent->i_iget5_offset; /* fh16 [sic] */
fh32[4] = parent->i_generation;
len = 5;
type = 2;
}
*max_len = len;
return type;
}
struct isofs_fid {
u32 block;
u16 offset;
u16 parent_offset;
u32 generation;
u32 parent_block;
u32 parent_generation;
};
static struct dentry *isofs_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
struct isofs_fid *ifid = (struct isofs_fid *)fid;
if (fh_len < 3 || fh_type > 2)
return NULL;
return isofs_export_iget(sb, ifid->block, ifid->offset,
ifid->generation);
}
static struct dentry *isofs_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
struct isofs_fid *ifid = (struct isofs_fid *)fid;
if (fh_type != 2)
return NULL;
return isofs_export_iget(sb,
fh_len > 2 ? ifid->parent_block : 0,
ifid->parent_offset,
fh_len > 4 ? ifid->parent_generation : 0);
}
const struct export_operations isofs_export_ops = {
.encode_fh = isofs_export_encode_fh,
.fh_to_dentry = isofs_fh_to_dentry,
.fh_to_parent = isofs_fh_to_parent,
.get_parent = isofs_export_get_parent,
};
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_3842_0 |
crossvul-cpp_data_good_1508_6 | /*
Copyright (C) 2011 ABRT team
Copyright (C) 2011 RedHat Inc
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Authors:
Anton Arapov <anton@redhat.com>
Arjan van de Ven <arjan@linux.intel.com>
*/
#include <syslog.h>
#include "libabrt.h"
/* How many problem dirs to create at most?
* Also causes cooldown sleep with -t if exceeded -
* useful when called from a log watcher.
*/
#define MAX_DUMPED_DD_COUNT 5
static bool world_readable_dump = false;
static bool throttle_dd_creation = false;
static const char *debug_dumps_dir = ".";
#define MAX_SCAN_BLOCK (4*1024*1024)
#define READ_AHEAD (10*1024)
static void scan_syslog_file(GList **oops_list, int fd)
{
struct stat st;
struct stat *statbuf = &st;
/* Try to not allocate an absurd amount of memory */
int sz = MAX_SCAN_BLOCK - READ_AHEAD;
/* If it's a real file, estimate size after cur pos */
off_t cur_pos = lseek(fd, 0, SEEK_CUR);
if (cur_pos >= 0 && fstat(fd, statbuf) == 0 && S_ISREG(statbuf->st_mode))
{
off_t size_to_read = statbuf->st_size - cur_pos;
if (size_to_read >= 0 && sz > size_to_read)
sz = size_to_read;
}
/*
* In theory we have a race here, since someone can spew
* to /var/log/messages before we read it in...
* We try to deal with it by reading READ_AHEAD extra.
*/
sz += READ_AHEAD;
char *buffer = xzalloc(sz);
for (;;)
{
int r = full_read(fd, buffer, sz-1);
if (r <= 0)
break;
log_debug("Read %u bytes", r);
koops_extract_oopses(oops_list, buffer, r);
//TODO: rewind to last newline?
}
free(buffer);
}
static char *list_of_tainted_modules(const char *proc_modules)
{
struct strbuf *result = strbuf_new();
const char *p = proc_modules;
for (;;)
{
const char *end = strchrnul(p, '\n');
const char *paren = strchrnul(p, '(');
/* We look for a line with this format:
* "kvm_intel 126289 0 - Live 0xf829e000 (taint_flags)"
* where taint_flags have letters
* (flags '+' and '-' indicate (un)loading, we must ignore them).
*/
while (++paren < end)
{
if ((unsigned)(toupper(*paren) - 'A') <= 'Z'-'A')
{
strbuf_append_strf(result, result->len == 0 ? "%.*s" : ",%.*s",
(int)(strchrnul(p,' ') - p), p
);
break;
}
if (*paren == ')')
break;
}
if (*end == '\0')
break;
p = end + 1;
}
if (result->len == 0)
{
strbuf_free(result);
return NULL;
}
return strbuf_free_nobuf(result);
}
static void save_oops_data_in_dump_dir(struct dump_dir *dd, char *oops, const char *proc_modules)
{
char *first_line = oops;
char *second_line = (char*)strchr(first_line, '\n'); /* never NULL */
*second_line++ = '\0';
if (first_line[0])
dd_save_text(dd, FILENAME_KERNEL, first_line);
dd_save_text(dd, FILENAME_BACKTRACE, second_line);
/* check if trace doesn't have line: 'Your BIOS is broken' */
if (strstr(second_line, "Your BIOS is broken"))
dd_save_text(dd, FILENAME_NOT_REPORTABLE,
_("A kernel problem occurred because of broken BIOS. "
"Unfortunately, such problems are not fixable by kernel maintainers."));
/* check if trace doesn't have line: 'Your hardware is unsupported' */
else if (strstr(second_line, "Your hardware is unsupported"))
dd_save_text(dd, FILENAME_NOT_REPORTABLE,
_("A kernel problem occurred, but your hardware is unsupported, "
"therefore kernel maintainers are unable to fix this problem."));
else
{
char *tainted_short = kernel_tainted_short(second_line);
if (tainted_short)
{
log_notice("Kernel is tainted '%s'", tainted_short);
dd_save_text(dd, FILENAME_TAINTED_SHORT, tainted_short);
char *tnt_long = kernel_tainted_long(tainted_short);
dd_save_text(dd, FILENAME_TAINTED_LONG, tnt_long);
free(tnt_long);
struct strbuf *reason = strbuf_new();
const char *fmt = _("A kernel problem occurred, but your kernel has been "
"tainted (flags:%s). Kernel maintainers are unable to "
"diagnose tainted reports.");
strbuf_append_strf(reason, fmt, tainted_short);
char *modlist = !proc_modules ? NULL : list_of_tainted_modules(proc_modules);
if (modlist)
{
strbuf_append_strf(reason, _(" Tainted modules: %s."), modlist);
free(modlist);
}
dd_save_text(dd, FILENAME_NOT_REPORTABLE, reason->buf);
strbuf_free(reason);
free(tainted_short);
}
}
// TODO: add "Kernel oops: " prefix, so that all oopses have recognizable FILENAME_REASON?
// kernel oops 1st line may look quite puzzling otherwise...
strchrnul(second_line, '\n')[0] = '\0';
dd_save_text(dd, FILENAME_REASON, second_line);
}
/* returns number of errors */
static unsigned create_oops_dump_dirs(GList *oops_list, unsigned oops_cnt)
{
unsigned countdown = MAX_DUMPED_DD_COUNT; /* do not report hundreds of oopses */
log_notice("Saving %u oopses as problem dirs", oops_cnt >= countdown ? countdown : oops_cnt);
char *cmdline_str = xmalloc_fopen_fgetline_fclose("/proc/cmdline");
char *fips_enabled = xmalloc_fopen_fgetline_fclose("/proc/sys/crypto/fips_enabled");
char *proc_modules = xmalloc_open_read_close("/proc/modules", /*maxsize:*/ NULL);
char *suspend_stats = xmalloc_open_read_close("/sys/kernel/debug/suspend_stats", /*maxsize:*/ NULL);
time_t t = time(NULL);
const char *iso_date = iso_date_string(&t);
/* dump should be readable by all if we're run with -x */
uid_t my_euid = (uid_t)-1L;
mode_t mode = DEFAULT_DUMP_DIR_MODE | S_IROTH;
/* and readable only for the owner otherwise */
if (!world_readable_dump)
{
mode = DEFAULT_DUMP_DIR_MODE;
my_euid = geteuid();
}
if (g_settings_privatereports)
{
if (world_readable_dump)
log("Not going to make dump directories world readable because PrivateReports is on");
mode = DEFAULT_DUMP_DIR_MODE;
my_euid = 0;
}
pid_t my_pid = getpid();
unsigned idx = 0;
unsigned errors = 0;
while (idx < oops_cnt)
{
char base[sizeof("oops-YYYY-MM-DD-hh:mm:ss-%lu-%lu") + 2 * sizeof(long)*3];
sprintf(base, "oops-%s-%lu-%lu", iso_date, (long)my_pid, (long)idx);
char *path = concat_path_file(debug_dumps_dir, base);
struct dump_dir *dd = dd_create(path, /*uid:*/ my_euid, mode);
if (dd)
{
dd_create_basic_files(dd, /*uid:*/ my_euid, NULL);
save_oops_data_in_dump_dir(dd, (char*)g_list_nth_data(oops_list, idx++), proc_modules);
dd_save_text(dd, FILENAME_ABRT_VERSION, VERSION);
dd_save_text(dd, FILENAME_ANALYZER, "Kerneloops");
dd_save_text(dd, FILENAME_TYPE, "Kerneloops");
if (cmdline_str)
dd_save_text(dd, FILENAME_CMDLINE, cmdline_str);
if (proc_modules)
dd_save_text(dd, "proc_modules", proc_modules);
if (fips_enabled && strcmp(fips_enabled, "0") != 0)
dd_save_text(dd, "fips_enabled", fips_enabled);
if (suspend_stats)
dd_save_text(dd, "suspend_stats", suspend_stats);
dd_close(dd);
notify_new_path(path);
}
else
errors++;
free(path);
if (--countdown == 0)
break;
if (dd && throttle_dd_creation)
sleep(1);
}
free(cmdline_str);
free(proc_modules);
free(fips_enabled);
free(suspend_stats);
return errors;
}
int main(int argc, char **argv)
{
/* I18n */
setlocale(LC_ALL, "");
#if ENABLE_NLS
bindtextdomain(PACKAGE, LOCALEDIR);
textdomain(PACKAGE);
#endif
abrt_init(argv);
/* Can't keep these strings/structs static: _() doesn't support that */
const char *program_usage_string = _(
"& [-vusoxm] [-d DIR]/[-D] [FILE]\n"
"\n"
"Extract oops from FILE (or standard input)"
);
enum {
OPT_v = 1 << 0,
OPT_s = 1 << 1,
OPT_o = 1 << 2,
OPT_d = 1 << 3,
OPT_D = 1 << 4,
OPT_u = 1 << 5,
OPT_x = 1 << 6,
OPT_t = 1 << 7,
OPT_m = 1 << 8,
};
char *problem_dir = NULL;
/* Keep enum above and order of options below in sync! */
struct options program_options[] = {
OPT__VERBOSE(&g_verbose),
OPT_BOOL( 's', NULL, NULL, _("Log to syslog")),
OPT_BOOL( 'o', NULL, NULL, _("Print found oopses on standard output")),
/* oopses don't contain any sensitive info, and even
* the old koops app was showing the oopses to all users
*/
OPT_STRING('d', NULL, &debug_dumps_dir, "DIR", _("Create new problem directory in DIR for every oops found")),
OPT_BOOL( 'D', NULL, NULL, _("Same as -d DumpLocation, DumpLocation is specified in abrt.conf")),
OPT_STRING('u', NULL, &problem_dir, "PROBLEM", _("Save the extracted information in PROBLEM")),
OPT_BOOL( 'x', NULL, NULL, _("Make the problem directory world readable")),
OPT_BOOL( 't', NULL, NULL, _("Throttle problem directory creation to 1 per second")),
OPT_BOOL( 'm', NULL, NULL, _("Print search string(s) to stdout and exit")),
OPT_END()
};
unsigned opts = parse_opts(argc, argv, program_options, program_usage_string);
export_abrt_envvars(0);
msg_prefix = g_progname;
if ((opts & OPT_s) || getenv("ABRT_SYSLOG"))
{
logmode = LOGMODE_JOURNAL;
}
if (opts & OPT_m)
{
map_string_t *settings = new_map_string();
load_abrt_plugin_conf_file("oops.conf", settings);
int only_fatal_mce = 1;
try_get_map_string_item_as_bool(settings, "OnlyFatalMCE", &only_fatal_mce);
free_map_string(settings);
if (only_fatal_mce)
{
regex_t mce_re;
if (regcomp(&mce_re, "^Machine .*$", REG_NOSUB) != 0)
perror_msg_and_die(_("Failed to compile regex"));
const regex_t *filter[] = { &mce_re, NULL };
koops_print_suspicious_strings_filtered(filter);
regfree(&mce_re);
}
else
koops_print_suspicious_strings();
return 1;
}
if (opts & OPT_D)
{
if (opts & OPT_d)
show_usage_and_die(program_usage_string, program_options);
load_abrt_conf();
debug_dumps_dir = g_settings_dump_location;
g_settings_dump_location = NULL;
free_abrt_conf_data();
}
argv += optind;
if (argv[0])
xmove_fd(xopen(argv[0], O_RDONLY), STDIN_FILENO);
world_readable_dump = (opts & OPT_x);
throttle_dd_creation = (opts & OPT_t);
unsigned errors = 0;
GList *oops_list = NULL;
scan_syslog_file(&oops_list, STDIN_FILENO);
int oops_cnt = g_list_length(oops_list);
if (oops_cnt != 0)
{
log("Found oopses: %d", oops_cnt);
if (opts & OPT_o)
{
int i = 0;
while (i < oops_cnt)
{
char *kernel_bt = (char*)g_list_nth_data(oops_list, i++);
char *tainted_short = kernel_tainted_short(kernel_bt);
if (tainted_short)
log("Kernel is tainted '%s'", tainted_short);
free(tainted_short);
printf("\nVersion: %s", kernel_bt);
}
}
if (opts & (OPT_d|OPT_D))
{
if (opts & OPT_D)
{
load_abrt_conf();
debug_dumps_dir = g_settings_dump_location;
}
log("Creating problem directories");
errors = create_oops_dump_dirs(oops_list, oops_cnt);
if (errors)
log("%d errors while dumping oopses", errors);
/*
* This marker in syslog file prevents us from
* re-parsing old oopses. The only problem is that we
* can't be sure here that the file we are watching
* is the same file where syslog(xxx) stuff ends up.
*/
syslog(LOG_WARNING,
"Reported %u kernel oopses to Abrt",
oops_cnt
);
}
if (opts & OPT_u)
{
log("Updating problem directory");
switch (oops_cnt)
{
case 1:
{
struct dump_dir *dd = dd_opendir(problem_dir, /*open for writing*/0);
if (dd)
{
save_oops_data_in_dump_dir(dd, (char *)oops_list->data, /*no proc modules*/NULL);
dd_close(dd);
}
}
break;
default:
error_msg(_("Can't update the problem: more than one oops found"));
break;
}
}
}
list_free_with_free(oops_list);
//oops_list = NULL;
/* If we are run by a log watcher, this delays log rescan
* (because log watcher waits to us to terminate)
* and possibly prevents dreaded "abrt storm".
*/
int unreported_cnt = oops_cnt - MAX_DUMPED_DD_COUNT;
if (unreported_cnt > 0 && throttle_dd_creation)
{
/* Quadratic throttle time growth, but careful to not overflow in "n*n" */
int n = unreported_cnt > 30 ? 30 : unreported_cnt;
n = n * n;
if (n > 9)
log(_("Sleeping for %d seconds"), n);
sleep(n); /* max 15 mins */
}
return errors;
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_1508_6 |
crossvul-cpp_data_good_5500_0 | /******************************************************************************
* emulate.c
*
* Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
*
* Copyright (c) 2005 Keir Fraser
*
* Linux coding style, mod r/m decoder, segment base fixes, real-mode
* privileged instructions:
*
* Copyright (C) 2006 Qumranet
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Avi Kivity <avi@qumranet.com>
* Yaniv Kamay <yaniv@qumranet.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
* From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
*/
#include <linux/kvm_host.h>
#include "kvm_cache_regs.h"
#include <asm/kvm_emulate.h>
#include <linux/stringify.h>
#include <asm/debugreg.h>
#include "x86.h"
#include "tss.h"
/*
* Operand types
*/
#define OpNone 0ull
#define OpImplicit 1ull /* No generic decode */
#define OpReg 2ull /* Register */
#define OpMem 3ull /* Memory */
#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
#define OpDI 5ull /* ES:DI/EDI/RDI */
#define OpMem64 6ull /* Memory, 64-bit */
#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
#define OpDX 8ull /* DX register */
#define OpCL 9ull /* CL register (for shifts) */
#define OpImmByte 10ull /* 8-bit sign extended immediate */
#define OpOne 11ull /* Implied 1 */
#define OpImm 12ull /* Sign extended up to 32-bit immediate */
#define OpMem16 13ull /* Memory operand (16-bit). */
#define OpMem32 14ull /* Memory operand (32-bit). */
#define OpImmU 15ull /* Immediate operand, zero extended */
#define OpSI 16ull /* SI/ESI/RSI */
#define OpImmFAddr 17ull /* Immediate far address */
#define OpMemFAddr 18ull /* Far address in memory */
#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
#define OpES 20ull /* ES */
#define OpCS 21ull /* CS */
#define OpSS 22ull /* SS */
#define OpDS 23ull /* DS */
#define OpFS 24ull /* FS */
#define OpGS 25ull /* GS */
#define OpMem8 26ull /* 8-bit zero extended memory operand */
#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
#define OpBits 5 /* Width of operand field */
#define OpMask ((1ull << OpBits) - 1)
/*
* Opcode effective-address decode tables.
* Note that we only emulate instructions that have at least one memory
* operand (excluding implicit stack references). We assume that stack
* references and instruction fetches will never occur in special memory
* areas that require emulation. So, for example, 'mov <imm>,<reg>' need
* not be handled.
*/
/* Operand sizes: 8-bit operands or specified/overridden size. */
#define ByteOp (1<<0) /* 8-bit operands. */
/* Destination operand type. */
#define DstShift 1
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg (OpReg << DstShift)
#define DstMem (OpMem << DstShift)
#define DstAcc (OpAcc << DstShift)
#define DstDI (OpDI << DstShift)
#define DstMem64 (OpMem64 << DstShift)
#define DstMem16 (OpMem16 << DstShift)
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX (OpDX << DstShift)
#define DstAccLo (OpAccLo << DstShift)
#define DstMask (OpMask << DstShift)
/* Source operand type. */
#define SrcShift 6
#define SrcNone (OpNone << SrcShift)
#define SrcReg (OpReg << SrcShift)
#define SrcMem (OpMem << SrcShift)
#define SrcMem16 (OpMem16 << SrcShift)
#define SrcMem32 (OpMem32 << SrcShift)
#define SrcImm (OpImm << SrcShift)
#define SrcImmByte (OpImmByte << SrcShift)
#define SrcOne (OpOne << SrcShift)
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU (OpImmU << SrcShift)
#define SrcSI (OpSI << SrcShift)
#define SrcXLat (OpXLat << SrcShift)
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc (OpAcc << SrcShift)
#define SrcImmU16 (OpImmU16 << SrcShift)
#define SrcImm64 (OpImm64 << SrcShift)
#define SrcDX (OpDX << SrcShift)
#define SrcMem8 (OpMem8 << SrcShift)
#define SrcAccHi (OpAccHi << SrcShift)
#define SrcMask (OpMask << SrcShift)
#define BitOp (1<<11)
#define MemAbs (1<<12) /* Memory operand is absolute displacement */
#define String (1<<13) /* String instruction (rep capable) */
#define Stack (1<<14) /* Stack instruction (push/pop) */
#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
#define Escape (5<<15) /* Escape to coprocessor instruction */
#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
#define ModeDual (7<<15) /* Different instruction for 32/64 bit */
#define Sse (1<<18) /* SSE Vector instruction */
/* Generic ModRM decode. */
#define ModRM (1<<19)
/* Destination is only written; never read. */
#define Mov (1<<20)
/* Misc flags */
#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
#define Undefined (1<<25) /* No Such Instruction */
#define Lock (1<<26) /* lock prefix is allowed for the instruction */
#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
#define No64 (1<<28)
#define PageTable (1 << 29) /* instruction used to write page table */
#define NotImpl (1 << 30) /* instruction is not implemented */
/* Source 2 operand type */
#define Src2Shift (31)
#define Src2None (OpNone << Src2Shift)
#define Src2Mem (OpMem << Src2Shift)
#define Src2CL (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2One (OpOne << Src2Shift)
#define Src2Imm (OpImm << Src2Shift)
#define Src2ES (OpES << Src2Shift)
#define Src2CS (OpCS << Src2Shift)
#define Src2SS (OpSS << Src2Shift)
#define Src2DS (OpDS << Src2Shift)
#define Src2FS (OpFS << Src2Shift)
#define Src2GS (OpGS << Src2Shift)
#define Src2Mask (OpMask << Src2Shift)
#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
#define NoWrite ((u64)1 << 45) /* No writeback */
#define SrcWrite ((u64)1 << 46) /* Write back src operand */
#define NoMod ((u64)1 << 47) /* Mod field is ignored */
#define Intercept ((u64)1 << 48) /* Has valid intercept field */
#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
#define NearBranch ((u64)1 << 52) /* Near branches */
#define No16 ((u64)1 << 53) /* No 16 bit operand */
#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
#define FASTOP_SIZE 8
/*
* fastop functions have a special calling convention:
*
* dst: rax (in/out)
* src: rdx (in/out)
* src2: rcx (in)
* flags: rflags (in/out)
* ex: rsi (in:fastop pointer, out:zero if exception)
*
* Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
* different operand sizes can be reached by calculation, rather than a jump
* table (which would be bigger than the code).
*
* fastop functions are declared as taking a never-defined fastop parameter,
* so they can't be called from C directly.
*/
struct fastop;
struct opcode {
u64 flags : 56;
u64 intercept : 8;
union {
int (*execute)(struct x86_emulate_ctxt *ctxt);
const struct opcode *group;
const struct group_dual *gdual;
const struct gprefix *gprefix;
const struct escape *esc;
const struct instr_dual *idual;
const struct mode_dual *mdual;
void (*fastop)(struct fastop *fake);
} u;
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
};
struct group_dual {
struct opcode mod012[8];
struct opcode mod3[8];
};
struct gprefix {
struct opcode pfx_no;
struct opcode pfx_66;
struct opcode pfx_f2;
struct opcode pfx_f3;
};
struct escape {
struct opcode op[8];
struct opcode high[64];
};
struct instr_dual {
struct opcode mod012;
struct opcode mod3;
};
struct mode_dual {
struct opcode mode32;
struct opcode mode64;
};
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
enum x86_transfer_type {
X86_TRANSFER_NONE,
X86_TRANSFER_CALL_JMP,
X86_TRANSFER_RET,
X86_TRANSFER_TASK_SWITCH,
};
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
if (!(ctxt->regs_valid & (1 << nr))) {
ctxt->regs_valid |= 1 << nr;
ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
}
return ctxt->_regs[nr];
}
static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
ctxt->regs_valid |= 1 << nr;
ctxt->regs_dirty |= 1 << nr;
return &ctxt->_regs[nr];
}
static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
reg_read(ctxt, nr);
return reg_write(ctxt, nr);
}
static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{
unsigned reg;
for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
}
static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
{
ctxt->regs_dirty = 0;
ctxt->regs_valid = 0;
}
/*
* These EFLAGS bits are restored from saved value during emulation, and
* any changes are written back to the saved value after emulation.
*/
#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
X86_EFLAGS_PF|X86_EFLAGS_CF)
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
#define FOP_FUNC(name) \
".align " __stringify(FASTOP_SIZE) " \n\t" \
".type " name ", @function \n\t" \
name ":\n\t"
#define FOP_RET "ret \n\t"
#define FOP_START(op) \
extern void em_##op(struct fastop *fake); \
asm(".pushsection .text, \"ax\" \n\t" \
".global em_" #op " \n\t" \
FOP_FUNC("em_" #op)
#define FOP_END \
".popsection")
#define FOPNOP() \
FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
FOP_RET
#define FOP1E(op, dst) \
FOP_FUNC(#op "_" #dst) \
"10: " #op " %" #dst " \n\t" FOP_RET
#define FOP1EEX(op, dst) \
FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
#define FASTOP1(op) \
FOP_START(op) \
FOP1E(op##b, al) \
FOP1E(op##w, ax) \
FOP1E(op##l, eax) \
ON64(FOP1E(op##q, rax)) \
FOP_END
/* 1-operand, using src2 (for MUL/DIV r/m) */
#define FASTOP1SRC2(op, name) \
FOP_START(name) \
FOP1E(op, cl) \
FOP1E(op, cx) \
FOP1E(op, ecx) \
ON64(FOP1E(op, rcx)) \
FOP_END
/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
#define FASTOP1SRC2EX(op, name) \
FOP_START(name) \
FOP1EEX(op, cl) \
FOP1EEX(op, cx) \
FOP1EEX(op, ecx) \
ON64(FOP1EEX(op, rcx)) \
FOP_END
#define FOP2E(op, dst, src) \
FOP_FUNC(#op "_" #dst "_" #src) \
#op " %" #src ", %" #dst " \n\t" FOP_RET
#define FASTOP2(op) \
FOP_START(op) \
FOP2E(op##b, al, dl) \
FOP2E(op##w, ax, dx) \
FOP2E(op##l, eax, edx) \
ON64(FOP2E(op##q, rax, rdx)) \
FOP_END
/* 2 operand, word only */
#define FASTOP2W(op) \
FOP_START(op) \
FOPNOP() \
FOP2E(op##w, ax, dx) \
FOP2E(op##l, eax, edx) \
ON64(FOP2E(op##q, rax, rdx)) \
FOP_END
/* 2 operand, src is CL */
#define FASTOP2CL(op) \
FOP_START(op) \
FOP2E(op##b, al, cl) \
FOP2E(op##w, ax, cl) \
FOP2E(op##l, eax, cl) \
ON64(FOP2E(op##q, rax, cl)) \
FOP_END
/* 2 operand, src and dest are reversed */
#define FASTOP2R(op, name) \
FOP_START(name) \
FOP2E(op##b, dl, al) \
FOP2E(op##w, dx, ax) \
FOP2E(op##l, edx, eax) \
ON64(FOP2E(op##q, rdx, rax)) \
FOP_END
#define FOP3E(op, dst, src, src2) \
FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
#op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
FOP_START(op) \
FOPNOP() \
FOP3E(op##w, ax, dx, cl) \
FOP3E(op##l, eax, edx, cl) \
ON64(FOP3E(op##q, rax, rdx, cl)) \
FOP_END
/* Special case for SETcc - 1 instruction per cc */
#define FOP_SETCC(op) \
".align 4 \n\t" \
".type " #op ", @function \n\t" \
#op ": \n\t" \
#op " %al \n\t" \
FOP_RET
asm(".global kvm_fastop_exception \n"
"kvm_fastop_exception: xor %esi, %esi; ret");
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
FOP_SETCC(setnc)
FOP_SETCC(setz)
FOP_SETCC(setnz)
FOP_SETCC(setbe)
FOP_SETCC(setnbe)
FOP_SETCC(sets)
FOP_SETCC(setns)
FOP_SETCC(setp)
FOP_SETCC(setnp)
FOP_SETCC(setl)
FOP_SETCC(setnl)
FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
enum x86_intercept_stage stage)
{
struct x86_instruction_info info = {
.intercept = intercept,
.rep_prefix = ctxt->rep_prefix,
.modrm_mod = ctxt->modrm_mod,
.modrm_reg = ctxt->modrm_reg,
.modrm_rm = ctxt->modrm_rm,
.src_val = ctxt->src.val64,
.dst_val = ctxt->dst.val64,
.src_bytes = ctxt->src.bytes,
.dst_bytes = ctxt->dst.bytes,
.ad_bytes = ctxt->ad_bytes,
.next_rip = ctxt->eip,
};
return ctxt->ops->intercept(ctxt, &info, stage);
}
static void assign_masked(ulong *dest, ulong src, ulong mask)
{
*dest = (*dest & ~mask) | (src & mask);
}
static void assign_register(unsigned long *reg, u64 val, int bytes)
{
/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
switch (bytes) {
case 1:
*(u8 *)reg = (u8)val;
break;
case 2:
*(u16 *)reg = (u16)val;
break;
case 4:
*reg = (u32)val;
break; /* 64b: zero-extend */
case 8:
*reg = val;
break;
}
}
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
{
return (1UL << (ctxt->ad_bytes << 3)) - 1;
}
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{
u16 sel;
struct desc_struct ss;
if (ctxt->mode == X86EMUL_MODE_PROT64)
return ~0UL;
ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
}
static int stack_size(struct x86_emulate_ctxt *ctxt)
{
return (__fls(stack_mask(ctxt)) + 1) >> 3;
}
/* Access/update address held in a register, based on addressing mode. */
static inline unsigned long
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
{
if (ctxt->ad_bytes == sizeof(unsigned long))
return reg;
else
return reg & ad_mask(ctxt);
}
static inline unsigned long
register_address(struct x86_emulate_ctxt *ctxt, int reg)
{
return address_mask(ctxt, reg_read(ctxt, reg));
}
static void masked_increment(ulong *reg, ulong mask, int inc)
{
assign_masked(reg, *reg + inc, mask);
}
static inline void
register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
{
ulong *preg = reg_rmw(ctxt, reg);
assign_register(preg, *preg + inc, ctxt->ad_bytes);
}
static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{
masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
}
static u32 desc_limit_scaled(struct desc_struct *desc)
{
u32 limit = get_desc_limit(desc);
return desc->g ? (limit << 12) | 0xfff : limit;
}
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
{
if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
return 0;
return ctxt->ops->get_cached_segment_base(ctxt, seg);
}
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
u32 error, bool valid)
{
WARN_ON(vec > 0x1f);
ctxt->exception.vector = vec;
ctxt->exception.error_code = error;
ctxt->exception.error_code_valid = valid;
return X86EMUL_PROPAGATE_FAULT;
}
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, DB_VECTOR, 0, false);
}
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
{
return emulate_exception(ctxt, GP_VECTOR, err, true);
}
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
return emulate_exception(ctxt, SS_VECTOR, err, true);
}
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, UD_VECTOR, 0, false);
}
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
{
return emulate_exception(ctxt, TS_VECTOR, err, true);
}
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, DE_VECTOR, 0, false);
}
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, NM_VECTOR, 0, false);
}
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
u16 selector;
struct desc_struct desc;
ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
return selector;
}
static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
unsigned seg)
{
u16 dummy;
u32 base3;
struct desc_struct desc;
ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}
/*
* x86 defines three classes of vector instructions: explicitly
* aligned, explicitly unaligned, and the rest, which change behaviour
* depending on whether they're AVX encoded or not.
*
* Also included is CMPXCHG16B which is not a vector instruction, yet it is
* subject to the same check.
*/
static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
{
if (likely(size < 16))
return false;
if (ctxt->d & Aligned)
return true;
else if (ctxt->d & Unaligned)
return false;
else if (ctxt->d & Avx)
return false;
else
return true;
}
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned *max_size, unsigned size,
bool write, bool fetch,
enum x86emul_mode mode, ulong *linear)
{
struct desc_struct desc;
bool usable;
ulong la;
u32 lim;
u16 sel;
la = seg_base(ctxt, addr.seg) + addr.ea;
*max_size = 0;
switch (mode) {
case X86EMUL_MODE_PROT64:
*linear = la;
if (is_noncanonical_address(la))
goto bad;
*max_size = min_t(u64, ~0u, (1ull << 48) - la);
if (size > *max_size)
goto bad;
break;
default:
*linear = la = (u32)la;
usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
addr.seg);
if (!usable)
goto bad;
/* code segment in protected mode or read-only data segment */
if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
|| !(desc.type & 2)) && write)
goto bad;
/* unreadable code segment */
if (!fetch && (desc.type & 8) && !(desc.type & 2))
goto bad;
lim = desc_limit_scaled(&desc);
if (!(desc.type & 8) && (desc.type & 4)) {
/* expand-down segment */
if (addr.ea <= lim)
goto bad;
lim = desc.d ? 0xffffffff : 0xffff;
}
if (addr.ea > lim)
goto bad;
if (lim == 0xffffffff)
*max_size = ~0u;
else {
*max_size = (u64)lim + 1 - addr.ea;
if (size > *max_size)
goto bad;
}
break;
}
if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
bad:
if (addr.seg == VCPU_SREG_SS)
return emulate_ss(ctxt, 0);
else
return emulate_gp(ctxt, 0);
}
static int linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned size, bool write,
ulong *linear)
{
unsigned max_size;
return __linearize(ctxt, addr, &max_size, size, write, false,
ctxt->mode, linear);
}
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
enum x86emul_mode mode)
{
ulong linear;
int rc;
unsigned max_size;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = dst };
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->_eip = addr.ea;
return rc;
}
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
return assign_eip(ctxt, dst, ctxt->mode);
}
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
const struct desc_struct *cs_desc)
{
enum x86emul_mode mode = ctxt->mode;
int rc;
#ifdef CONFIG_X86_64
if (ctxt->mode >= X86EMUL_MODE_PROT16) {
if (cs_desc->l) {
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
mode = X86EMUL_MODE_PROT64;
} else
mode = X86EMUL_MODE_PROT32; /* temporary value */
}
#endif
if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
rc = assign_eip(ctxt, dst, mode);
if (rc == X86EMUL_CONTINUE)
ctxt->mode = mode;
return rc;
}
static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{
return assign_eip_near(ctxt, ctxt->_eip + rel);
}
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, false, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
}
/*
* Prefetch the remaining bytes of the instruction without crossing page
* boundary if they are not in fetch_cache yet.
*/
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
{
int rc;
unsigned size, max_size;
unsigned long linear;
int cur_size = ctxt->fetch.end - ctxt->fetch.data;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = ctxt->eip + cur_size };
/*
* We do not know exactly how many bytes will be needed, and
* __linearize is expensive, so fetch as much as possible. We
* just have to avoid going beyond the 15 byte limit, the end
* of the segment, or the end of the page.
*
* __linearize is called with size 0 so that it does not do any
* boundary check itself. Instead, we use max_size to check
* against op_size.
*/
rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
&linear);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
size = min_t(unsigned, 15UL ^ cur_size, max_size);
size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
/*
* One instruction can only straddle two pages,
* and one has been loaded at the beginning of
* x86_decode_insn. So, if not enough bytes
* still, we must have hit the 15-byte boundary.
*/
if (unlikely(size < op_size))
return emulate_gp(ctxt, 0);
rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
size, &ctxt->exception);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
ctxt->fetch.end += size;
return X86EMUL_CONTINUE;
}
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
unsigned size)
{
unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
if (unlikely(done_size < size))
return __do_insn_fetch_bytes(ctxt, size - done_size);
else
return X86EMUL_CONTINUE;
}
/* Fetch next part of the instruction being emulated. */
#define insn_fetch(_type, _ctxt) \
({ _type _x; \
\
rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
if (rc != X86EMUL_CONTINUE) \
goto done; \
ctxt->_eip += sizeof(_type); \
_x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
ctxt->fetch.ptr += sizeof(_type); \
_x; \
})
#define insn_fetch_arr(_arr, _size, _ctxt) \
({ \
rc = do_insn_fetch_bytes(_ctxt, _size); \
if (rc != X86EMUL_CONTINUE) \
goto done; \
ctxt->_eip += (_size); \
memcpy(_arr, ctxt->fetch.ptr, _size); \
ctxt->fetch.ptr += (_size); \
})
/*
* Given the 'reg' portion of a ModRM byte, and a register block, return a
* pointer into the block that addresses the relevant register.
* @highbyte_regs specifies whether to decode AH,CH,DH,BH.
*/
static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
int byteop)
{
void *p;
int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
else
p = reg_rmw(ctxt, modrm_reg);
return p;
}
static int read_descriptor(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
u16 *size, unsigned long *address, int op_bytes)
{
int rc;
if (op_bytes == 2)
op_bytes = 3;
*address = 0;
rc = segmented_read_std(ctxt, addr, size, 2);
if (rc != X86EMUL_CONTINUE)
return rc;
addr.ea += 2;
rc = segmented_read_std(ctxt, addr, address, op_bytes);
return rc;
}
FASTOP2(add);
FASTOP2(or);
FASTOP2(adc);
FASTOP2(sbb);
FASTOP2(and);
FASTOP2(sub);
FASTOP2(xor);
FASTOP2(cmp);
FASTOP2(test);
FASTOP1SRC2(mul, mul_ex);
FASTOP1SRC2(imul, imul_ex);
FASTOP1SRC2EX(div, div_ex);
FASTOP1SRC2EX(idiv, idiv_ex);
FASTOP3WCL(shld);
FASTOP3WCL(shrd);
FASTOP2W(imul);
FASTOP1(not);
FASTOP1(neg);
FASTOP1(inc);
FASTOP1(dec);
FASTOP2CL(rol);
FASTOP2CL(ror);
FASTOP2CL(rcl);
FASTOP2CL(rcr);
FASTOP2CL(shl);
FASTOP2CL(shr);
FASTOP2CL(sar);
FASTOP2W(bsf);
FASTOP2W(bsr);
FASTOP2W(bt);
FASTOP2W(bts);
FASTOP2W(btr);
FASTOP2W(btc);
FASTOP2(xadd);
FASTOP2R(cmp, cmp_r);
static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
{
/* If src is zero, do not writeback, but update flags */
if (ctxt->src.val == 0)
ctxt->dst.type = OP_NONE;
return fastop(ctxt, em_bsf);
}
static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
{
/* If src is zero, do not writeback, but update flags */
if (ctxt->src.val == 0)
ctxt->dst.type = OP_NONE;
return fastop(ctxt, em_bsr);
}
static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
{
u8 rc;
void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
asm("push %[flags]; popf; call *%[fastop]"
: "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
return rc;
}
static void fetch_register_operand(struct operand *op)
{
switch (op->bytes) {
case 1:
op->val = *(u8 *)op->addr.reg;
break;
case 2:
op->val = *(u16 *)op->addr.reg;
break;
case 4:
op->val = *(u32 *)op->addr.reg;
break;
case 8:
op->val = *(u64 *)op->addr.reg;
break;
}
}
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
#ifdef CONFIG_X86_64
case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
#endif
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
#ifdef CONFIG_X86_64
case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
#endif
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
ctxt->ops->get_fpu(ctxt);
asm volatile("fninit");
ctxt->ops->put_fpu(ctxt);
return X86EMUL_CONTINUE;
}
static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
{
u16 fcw;
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
ctxt->ops->get_fpu(ctxt);
asm volatile("fnstcw %0": "+m"(fcw));
ctxt->ops->put_fpu(ctxt);
ctxt->dst.val = fcw;
return X86EMUL_CONTINUE;
}
static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
{
u16 fsw;
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
ctxt->ops->get_fpu(ctxt);
asm volatile("fnstsw %0": "+m"(fsw));
ctxt->ops->put_fpu(ctxt);
ctxt->dst.val = fsw;
return X86EMUL_CONTINUE;
}
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
unsigned reg = ctxt->modrm_reg;
if (!(ctxt->d & ModRM))
reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = reg;
read_sse_reg(ctxt, &op->vec_val, reg);
return;
}
if (ctxt->d & Mmx) {
reg &= 7;
op->type = OP_MM;
op->bytes = 8;
op->addr.mm = reg;
return;
}
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
fetch_register_operand(op);
op->orig_val = op->val;
}
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{
if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
ctxt->modrm_seg = VCPU_SREG_SS;
}
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
u8 sib;
int index_reg, base_reg, scale;
int rc = X86EMUL_CONTINUE;
ulong modrm_ea = 0;
ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
ctxt->modrm_seg = VCPU_SREG_DS;
if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
ctxt->d & ByteOp);
if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = ctxt->modrm_rm;
read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
return rc;
}
if (ctxt->d & Mmx) {
op->type = OP_MM;
op->bytes = 8;
op->addr.mm = ctxt->modrm_rm & 7;
return rc;
}
fetch_register_operand(op);
return rc;
}
op->type = OP_MEM;
if (ctxt->ad_bytes == 2) {
unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
/* 16-bit ModR/M decode. */
switch (ctxt->modrm_mod) {
case 0:
if (ctxt->modrm_rm == 6)
modrm_ea += insn_fetch(u16, ctxt);
break;
case 1:
modrm_ea += insn_fetch(s8, ctxt);
break;
case 2:
modrm_ea += insn_fetch(u16, ctxt);
break;
}
switch (ctxt->modrm_rm) {
case 0:
modrm_ea += bx + si;
break;
case 1:
modrm_ea += bx + di;
break;
case 2:
modrm_ea += bp + si;
break;
case 3:
modrm_ea += bp + di;
break;
case 4:
modrm_ea += si;
break;
case 5:
modrm_ea += di;
break;
case 6:
if (ctxt->modrm_mod != 0)
modrm_ea += bp;
break;
case 7:
modrm_ea += bx;
break;
}
if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
(ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
ctxt->modrm_seg = VCPU_SREG_SS;
modrm_ea = (u16)modrm_ea;
} else {
/* 32/64-bit ModR/M decode. */
if ((ctxt->modrm_rm & 7) == 4) {
sib = insn_fetch(u8, ctxt);
index_reg |= (sib >> 3) & 7;
base_reg |= sib & 7;
scale = sib >> 6;
if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
modrm_ea += insn_fetch(s32, ctxt);
else {
modrm_ea += reg_read(ctxt, base_reg);
adjust_modrm_seg(ctxt, base_reg);
/* Increment ESP on POP [ESP] */
if ((ctxt->d & IncSP) &&
base_reg == VCPU_REGS_RSP)
modrm_ea += ctxt->op_bytes;
}
if (index_reg != 4)
modrm_ea += reg_read(ctxt, index_reg) << scale;
} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
modrm_ea += insn_fetch(s32, ctxt);
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->rip_relative = 1;
} else {
base_reg = ctxt->modrm_rm;
modrm_ea += reg_read(ctxt, base_reg);
adjust_modrm_seg(ctxt, base_reg);
}
switch (ctxt->modrm_mod) {
case 1:
modrm_ea += insn_fetch(s8, ctxt);
break;
case 2:
modrm_ea += insn_fetch(s32, ctxt);
break;
}
}
op->addr.mem.ea = modrm_ea;
if (ctxt->ad_bytes != 8)
ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
done:
return rc;
}
static int decode_abs(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
int rc = X86EMUL_CONTINUE;
op->type = OP_MEM;
switch (ctxt->ad_bytes) {
case 2:
op->addr.mem.ea = insn_fetch(u16, ctxt);
break;
case 4:
op->addr.mem.ea = insn_fetch(u32, ctxt);
break;
case 8:
op->addr.mem.ea = insn_fetch(u64, ctxt);
break;
}
done:
return rc;
}
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
{
long sv = 0, mask;
if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
mask = ~((long)ctxt->dst.bytes * 8 - 1);
if (ctxt->src.bytes == 2)
sv = (s16)ctxt->src.val & (s16)mask;
else if (ctxt->src.bytes == 4)
sv = (s32)ctxt->src.val & (s32)mask;
else
sv = (s64)ctxt->src.val & (s64)mask;
ctxt->dst.addr.mem.ea = address_mask(ctxt,
ctxt->dst.addr.mem.ea + (sv >> 3));
}
/* only subword offset */
ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
}
static int read_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr, void *dest, unsigned size)
{
int rc;
struct read_cache *mc = &ctxt->mem_read;
if (mc->pos < mc->end)
goto read_cached;
WARN_ON((mc->end + size) >= sizeof(mc->data));
rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
&ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
mc->end += size;
read_cached:
memcpy(dest, mc->data + mc->pos, size);
mc->pos += size;
return X86EMUL_CONTINUE;
}
static int segmented_read(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, false, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return read_emulated(ctxt, linear, data, size);
}
static int segmented_write(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
const void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->write_emulated(ctxt, linear, data, size,
&ctxt->exception);
}
static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
const void *orig_data, const void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
size, &ctxt->exception);
}
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
unsigned int size, unsigned short port,
void *dest)
{
struct read_cache *rc = &ctxt->io_read;
if (rc->pos == rc->end) { /* refill pio read ahead */
unsigned int in_page, n;
unsigned int count = ctxt->rep_prefix ?
address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
if (n == 0)
n = 1;
rc->pos = rc->end = 0;
if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
return 0;
rc->end = n * size;
}
if (ctxt->rep_prefix && (ctxt->d & String) &&
!(ctxt->eflags & X86_EFLAGS_DF)) {
ctxt->dst.data = rc->data + rc->pos;
ctxt->dst.type = OP_MEM_STR;
ctxt->dst.count = (rc->end - rc->pos) / size;
rc->pos = rc->end;
} else {
memcpy(dest, rc->data + rc->pos, size);
rc->pos += size;
}
return 1;
}
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
u16 index, struct desc_struct *desc)
{
struct desc_ptr dt;
ulong addr;
ctxt->ops->get_idt(ctxt, &dt);
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, index << 3 | 0x2);
addr = dt.address + index * 8;
return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
&ctxt->exception);
}
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_ptr *dt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
u32 base3 = 0;
if (selector & 1 << 2) {
struct desc_struct desc;
u16 sel;
memset (dt, 0, sizeof *dt);
if (!ops->get_segment(ctxt, &sel, &desc, &base3,
VCPU_SREG_LDTR))
return;
dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
} else
ops->get_gdt(ctxt, dt);
}
static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
u16 selector, ulong *desc_addr_p)
{
struct desc_ptr dt;
u16 index = selector >> 3;
ulong addr;
get_descriptor_table_ptr(ctxt, selector, &dt);
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, selector & 0xfffc);
addr = dt.address + index * 8;
#ifdef CONFIG_X86_64
if (addr >> 32 != 0) {
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (!(efer & EFER_LMA))
addr &= (u32)-1;
}
#endif
*desc_addr_p = addr;
return X86EMUL_CONTINUE;
}
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_struct *desc,
ulong *desc_addr_p)
{
int rc;
rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
&ctxt->exception);
}
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_struct *desc)
{
int rc;
ulong addr;
rc = get_descriptor_ptr(ctxt, selector, &addr);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
&ctxt->exception);
}
/* Does not support long mode */
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg, u8 cpl,
enum x86_transfer_type transfer,
struct desc_struct *desc)
{
struct desc_struct seg_desc, old_desc;
u8 dpl, rpl;
unsigned err_vec = GP_VECTOR;
u32 err_code = 0;
bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
ulong desc_addr;
int ret;
u16 dummy;
u32 base3 = 0;
memset(&seg_desc, 0, sizeof seg_desc);
if (ctxt->mode == X86EMUL_MODE_REAL) {
/* set real mode segment descriptor (keep limit etc. for
* unreal mode) */
ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
set_desc_base(&seg_desc, selector << 4);
goto load;
} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
/* VM86 needs a clean new segment descriptor */
set_desc_base(&seg_desc, selector << 4);
set_desc_limit(&seg_desc, 0xffff);
seg_desc.type = 3;
seg_desc.p = 1;
seg_desc.s = 1;
seg_desc.dpl = 3;
goto load;
}
rpl = selector & 3;
/* NULL selector is not valid for TR, CS and SS (except for long mode) */
if ((seg == VCPU_SREG_CS
|| (seg == VCPU_SREG_SS
&& (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
|| seg == VCPU_SREG_TR)
&& null_selector)
goto exception;
/* TR should be in GDT only */
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
goto exception;
if (null_selector) /* for NULL selector skip all following checks */
goto load;
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
err_code = selector & 0xfffc;
err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
GP_VECTOR;
/* can't load system descriptor into segment selector */
if (seg <= VCPU_SREG_GS && !seg_desc.s) {
if (transfer == X86_TRANSFER_CALL_JMP)
return X86EMUL_UNHANDLEABLE;
goto exception;
}
if (!seg_desc.p) {
err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
goto exception;
}
dpl = seg_desc.dpl;
switch (seg) {
case VCPU_SREG_SS:
/*
* segment is not a writable data segment or segment
* selector's RPL != CPL or segment selector's RPL != CPL
*/
if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
goto exception;
break;
case VCPU_SREG_CS:
if (!(seg_desc.type & 8))
goto exception;
if (seg_desc.type & 4) {
/* conforming */
if (dpl > cpl)
goto exception;
} else {
/* nonconforming */
if (rpl > cpl || dpl != cpl)
goto exception;
}
/* in long-mode d/b must be clear if l is set */
if (seg_desc.d && seg_desc.l) {
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
goto exception;
}
/* CS(RPL) <- CPL */
selector = (selector & 0xfffc) | cpl;
break;
case VCPU_SREG_TR:
if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
goto exception;
old_desc = seg_desc;
seg_desc.type |= 2; /* busy */
ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
sizeof(seg_desc), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
break;
case VCPU_SREG_LDTR:
if (seg_desc.s || seg_desc.type != 2)
goto exception;
break;
default: /* DS, ES, FS, or GS */
/*
* segment is not a data or readable code segment or
* ((segment is a data or nonconforming code segment)
* and (both RPL and CPL > DPL))
*/
if ((seg_desc.type & 0xa) == 0x8 ||
(((seg_desc.type & 0xc) != 0xc) &&
(rpl > dpl && cpl > dpl)))
goto exception;
break;
}
if (seg_desc.s) {
/* mark segment as accessed */
if (!(seg_desc.type & 1)) {
seg_desc.type |= 1;
ret = write_segment_descriptor(ctxt, selector,
&seg_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
}
} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
sizeof(base3), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
if (is_noncanonical_address(get_desc_base(&seg_desc) |
((u64)base3 << 32)))
return emulate_gp(ctxt, 0);
}
load:
ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
if (desc)
*desc = seg_desc;
return X86EMUL_CONTINUE;
exception:
return emulate_exception(ctxt, err_vec, err_code, true);
}
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg)
{
u8 cpl = ctxt->ops->cpl(ctxt);
return __load_segment_descriptor(ctxt, selector, seg, cpl,
X86_TRANSFER_NONE, NULL);
}
static void write_register_operand(struct operand *op)
{
return assign_register(op->addr.reg, op->val, op->bytes);
}
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
{
switch (op->type) {
case OP_REG:
write_register_operand(op);
break;
case OP_MEM:
if (ctxt->lock_prefix)
return segmented_cmpxchg(ctxt,
op->addr.mem,
&op->orig_val,
&op->val,
op->bytes);
else
return segmented_write(ctxt,
op->addr.mem,
&op->val,
op->bytes);
break;
case OP_MEM_STR:
return segmented_write(ctxt,
op->addr.mem,
op->data,
op->bytes * op->count);
break;
case OP_XMM:
write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
break;
case OP_MM:
write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
break;
case OP_NONE:
/* no writeback */
break;
default:
break;
}
return X86EMUL_CONTINUE;
}
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
{
struct segmented_address addr;
rsp_increment(ctxt, -bytes);
addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
return segmented_write(ctxt, addr, data, bytes);
}
static int em_push(struct x86_emulate_ctxt *ctxt)
{
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
}
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
void *dest, int len)
{
int rc;
struct segmented_address addr;
addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
rc = segmented_read(ctxt, addr, dest, len);
if (rc != X86EMUL_CONTINUE)
return rc;
rsp_increment(ctxt, len);
return rc;
}
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
}
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
void *dest, int len)
{
int rc;
unsigned long val, change_mask;
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
int cpl = ctxt->ops->cpl(ctxt);
rc = emulate_pop(ctxt, &val, len);
if (rc != X86EMUL_CONTINUE)
return rc;
change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
X86_EFLAGS_AC | X86_EFLAGS_ID;
switch(ctxt->mode) {
case X86EMUL_MODE_PROT64:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT16:
if (cpl == 0)
change_mask |= X86_EFLAGS_IOPL;
if (cpl <= iopl)
change_mask |= X86_EFLAGS_IF;
break;
case X86EMUL_MODE_VM86:
if (iopl < 3)
return emulate_gp(ctxt, 0);
change_mask |= X86_EFLAGS_IF;
break;
default: /* real mode */
change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
break;
}
*(unsigned long *)dest =
(ctxt->eflags & ~change_mask) | (val & change_mask);
return rc;
}
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.type = OP_REG;
ctxt->dst.addr.reg = &ctxt->eflags;
ctxt->dst.bytes = ctxt->op_bytes;
return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
}
static int em_enter(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned frame_size = ctxt->src.val;
unsigned nesting_level = ctxt->src2.val & 31;
ulong rbp;
if (nesting_level)
return X86EMUL_UNHANDLEABLE;
rbp = reg_read(ctxt, VCPU_REGS_RBP);
rc = push(ctxt, &rbp, stack_size(ctxt));
if (rc != X86EMUL_CONTINUE)
return rc;
assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
stack_mask(ctxt));
assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
stack_mask(ctxt));
return X86EMUL_CONTINUE;
}
static int em_leave(struct x86_emulate_ctxt *ctxt)
{
assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
stack_mask(ctxt));
return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
}
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
ctxt->src.val = get_segment_selector(ctxt, seg);
if (ctxt->op_bytes == 4) {
rsp_increment(ctxt, -2);
ctxt->op_bytes = 2;
}
return em_push(ctxt);
}
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
unsigned long selector;
int rc;
rc = emulate_pop(ctxt, &selector, 2);
if (rc != X86EMUL_CONTINUE)
return rc;
if (ctxt->modrm_reg == VCPU_SREG_SS)
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
if (ctxt->op_bytes > 2)
rsp_increment(ctxt, ctxt->op_bytes - 2);
rc = load_segment_descriptor(ctxt, (u16)selector, seg);
return rc;
}
static int em_pusha(struct x86_emulate_ctxt *ctxt)
{
unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RAX;
while (reg <= VCPU_REGS_RDI) {
(reg == VCPU_REGS_RSP) ?
(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
++reg;
}
return rc;
}
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
return em_push(ctxt);
}
static int em_popa(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RDI;
u32 val;
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
rsp_increment(ctxt, ctxt->op_bytes);
--reg;
}
rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
break;
assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
--reg;
}
return rc;
}
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
const struct x86_emulate_ops *ops = ctxt->ops;
int rc;
struct desc_ptr dt;
gva_t cs_addr;
gva_t eip_addr;
u16 cs, eip;
/* TODO: Add limit checks */
ctxt->src.val = ctxt->eflags;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->src.val = ctxt->_eip;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ops->get_idt(ctxt, &dt);
eip_addr = dt.address + (irq << 2);
cs_addr = dt.address + (irq << 2) + 2;
rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->_eip = eip;
return rc;
}
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
int rc;
invalidate_registers(ctxt);
rc = __emulate_int_real(ctxt, irq);
if (rc == X86EMUL_CONTINUE)
writeback_registers(ctxt);
return rc;
}
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
{
switch(ctxt->mode) {
case X86EMUL_MODE_REAL:
return __emulate_int_real(ctxt, irq);
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT64:
default:
/* Protected mode interrupts unimplemented yet */
return X86EMUL_UNHANDLEABLE;
}
}
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
unsigned long temp_eip = 0;
unsigned long temp_eflags = 0;
unsigned long cs = 0;
unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
X86_EFLAGS_AC | X86_EFLAGS_ID |
X86_EFLAGS_FIXED;
unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
X86_EFLAGS_VIP;
/* TODO: Add stack limit check */
rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
if (temp_eip & ~0xffff)
return emulate_gp(ctxt, 0);
rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->_eip = temp_eip;
if (ctxt->op_bytes == 4)
ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
else if (ctxt->op_bytes == 2) {
ctxt->eflags &= ~0xffff;
ctxt->eflags |= temp_eflags;
}
ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
ctxt->eflags |= X86_EFLAGS_FIXED;
ctxt->ops->set_nmi_mask(ctxt, false);
return rc;
}
static int em_iret(struct x86_emulate_ctxt *ctxt)
{
switch(ctxt->mode) {
case X86EMUL_MODE_REAL:
return emulate_iret_real(ctxt);
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT64:
default:
/* iret from protected mode unimplemented yet */
return X86EMUL_UNHANDLEABLE;
}
}
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned short sel;
struct desc_struct new_desc;
u8 cpl = ctxt->ops->cpl(ctxt);
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
X86_TRANSFER_CALL_JMP,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
return rc;
}
static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
{
return assign_eip_near(ctxt, ctxt->src.val);
}
static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
{
int rc;
long int old_eip;
old_eip = ctxt->_eip;
rc = assign_eip_near(ctxt, ctxt->src.val);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->src.val = old_eip;
rc = em_push(ctxt);
return rc;
}
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
{
u64 old = ctxt->dst.orig_val64;
if (ctxt->dst.bytes == 16)
return X86EMUL_UNHANDLEABLE;
if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
ctxt->eflags &= ~X86_EFLAGS_ZF;
} else {
ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
(u32) reg_read(ctxt, VCPU_REGS_RBX);
ctxt->eflags |= X86_EFLAGS_ZF;
}
return X86EMUL_CONTINUE;
}
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
return assign_eip_near(ctxt, eip);
}
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip, cs;
int cpl = ctxt->ops->cpl(ctxt);
struct desc_struct new_desc;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
/* Outer-privilege level return is not implemented */
if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
return X86EMUL_UNHANDLEABLE;
rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
X86_TRANSFER_RET,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, eip, &new_desc);
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
return rc;
}
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
{
int rc;
rc = em_ret_far(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
rsp_increment(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE;
}
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
/* Save real source value, then compare EAX against destination. */
ctxt->dst.orig_val = ctxt->dst.val;
ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
ctxt->src.orig_val = ctxt->src.val;
ctxt->src.val = ctxt->dst.orig_val;
fastop(ctxt, em_cmp);
if (ctxt->eflags & X86_EFLAGS_ZF) {
/* Success: write back to memory; no update of EAX */
ctxt->src.type = OP_NONE;
ctxt->dst.val = ctxt->src.orig_val;
} else {
/* Failure: write the value we saw to EAX. */
ctxt->src.type = OP_REG;
ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
ctxt->src.val = ctxt->dst.orig_val;
/* Create write-cycle to dest by writing the same value */
ctxt->dst.val = ctxt->dst.orig_val;
}
return X86EMUL_CONTINUE;
}
static int em_lseg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
unsigned short sel;
int rc;
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = load_segment_descriptor(ctxt, sel, seg);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->dst.val = ctxt->src.val;
return rc;
}
static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
eax = 0x80000001;
ecx = 0;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
return edx & bit(X86_FEATURE_LM);
}
#define GET_SMSTATE(type, smbase, offset) \
({ \
type __val; \
int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
sizeof(__val)); \
if (r != X86EMUL_CONTINUE) \
return X86EMUL_UNHANDLEABLE; \
__val; \
})
static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
{
desc->g = (flags >> 23) & 1;
desc->d = (flags >> 22) & 1;
desc->l = (flags >> 21) & 1;
desc->avl = (flags >> 20) & 1;
desc->p = (flags >> 15) & 1;
desc->dpl = (flags >> 13) & 3;
desc->s = (flags >> 12) & 1;
desc->type = (flags >> 8) & 15;
}
static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
struct desc_struct desc;
int offset;
u16 selector;
selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
if (n < 3)
offset = 0x7f84 + n * 12;
else
offset = 0x7f2c + (n - 3) * 12;
set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
return X86EMUL_CONTINUE;
}
static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
struct desc_struct desc;
int offset;
u16 selector;
u32 base3;
offset = 0x7e00 + n * 16;
selector = GET_SMSTATE(u16, smbase, offset);
rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
base3 = GET_SMSTATE(u32, smbase, offset + 12);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
return X86EMUL_CONTINUE;
}
static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
u64 cr0, u64 cr4)
{
int bad;
/*
* First enable PAE, long mode needs it before CR0.PG = 1 is set.
* Then enable protected mode. However, PCID cannot be enabled
* if EFER.LMA=0, so set it separately.
*/
bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
if (bad)
return X86EMUL_UNHANDLEABLE;
bad = ctxt->ops->set_cr(ctxt, 0, cr0);
if (bad)
return X86EMUL_UNHANDLEABLE;
if (cr4 & X86_CR4_PCIDE) {
bad = ctxt->ops->set_cr(ctxt, 4, cr4);
if (bad)
return X86EMUL_UNHANDLEABLE;
}
return X86EMUL_CONTINUE;
}
static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
struct desc_struct desc;
struct desc_ptr dt;
u16 selector;
u32 val, cr0, cr4;
int i;
cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
for (i = 0; i < 8; i++)
*reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
val = GET_SMSTATE(u32, smbase, 0x7fcc);
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
val = GET_SMSTATE(u32, smbase, 0x7fc8);
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
selector = GET_SMSTATE(u32, smbase, 0x7fc4);
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
selector = GET_SMSTATE(u32, smbase, 0x7fc0);
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
ctxt->ops->set_gdt(ctxt, &dt);
dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
ctxt->ops->set_idt(ctxt, &dt);
for (i = 0; i < 6; i++) {
int r = rsm_load_seg_32(ctxt, smbase, i);
if (r != X86EMUL_CONTINUE)
return r;
}
cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
return rsm_enter_protected_mode(ctxt, cr0, cr4);
}
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
struct desc_struct desc;
struct desc_ptr dt;
u64 val, cr0, cr4;
u32 base3;
u16 selector;
int i, r;
for (i = 0; i < 16; i++)
*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
val = GET_SMSTATE(u32, smbase, 0x7f68);
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
val = GET_SMSTATE(u32, smbase, 0x7f60);
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50));
cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
val = GET_SMSTATE(u64, smbase, 0x7ed0);
ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
selector = GET_SMSTATE(u32, smbase, 0x7e90);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
ctxt->ops->set_idt(ctxt, &dt);
selector = GET_SMSTATE(u32, smbase, 0x7e70);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
ctxt->ops->set_gdt(ctxt, &dt);
r = rsm_enter_protected_mode(ctxt, cr0, cr4);
if (r != X86EMUL_CONTINUE)
return r;
for (i = 0; i < 6; i++) {
r = rsm_load_seg_64(ctxt, smbase, i);
if (r != X86EMUL_CONTINUE)
return r;
}
return X86EMUL_CONTINUE;
}
static int em_rsm(struct x86_emulate_ctxt *ctxt)
{
unsigned long cr0, cr4, efer;
u64 smbase;
int ret;
if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
return emulate_ud(ctxt);
/*
* Get back to real mode, to prepare a safe state in which to load
* CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
* supports long mode.
*/
cr4 = ctxt->ops->get_cr(ctxt, 4);
if (emulator_has_longmode(ctxt)) {
struct desc_struct cs_desc;
/* Zero CR4.PCIDE before CR0.PG. */
if (cr4 & X86_CR4_PCIDE) {
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
cr4 &= ~X86_CR4_PCIDE;
}
/* A 32-bit code segment is required to clear EFER.LMA. */
memset(&cs_desc, 0, sizeof(cs_desc));
cs_desc.type = 0xb;
cs_desc.s = cs_desc.g = cs_desc.p = 1;
ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
}
/* For the 64-bit case, this will clear EFER.LMA. */
cr0 = ctxt->ops->get_cr(ctxt, 0);
if (cr0 & X86_CR0_PE)
ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
/* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
if (cr4 & X86_CR4_PAE)
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
/* And finally go back to 32-bit mode. */
efer = 0;
ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
smbase = ctxt->ops->get_smbase(ctxt);
if (emulator_has_longmode(ctxt))
ret = rsm_load_state_64(ctxt, smbase + 0x8000);
else
ret = rsm_load_state_32(ctxt, smbase + 0x8000);
if (ret != X86EMUL_CONTINUE) {
/* FIXME: should triple fault */
return X86EMUL_UNHANDLEABLE;
}
if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);
ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
return X86EMUL_CONTINUE;
}
static void
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
struct desc_struct *cs, struct desc_struct *ss)
{
cs->l = 0; /* will be adjusted later */
set_desc_base(cs, 0); /* flat segment */
cs->g = 1; /* 4kb granularity */
set_desc_limit(cs, 0xfffff); /* 4GB limit */
cs->type = 0x0b; /* Read, Execute, Accessed */
cs->s = 1;
cs->dpl = 0; /* will be adjusted later */
cs->p = 1;
cs->d = 1;
cs->avl = 0;
set_desc_base(ss, 0); /* flat segment */
set_desc_limit(ss, 0xfffff); /* 4GB limit */
ss->g = 1; /* 4kb granularity */
ss->s = 1;
ss->type = 0x03; /* Read/Write, Accessed */
ss->d = 1; /* 32bit stack segment */
ss->dpl = 0;
ss->p = 1;
ss->l = 0;
ss->avl = 0;
}
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
eax = ecx = 0;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
u32 eax, ebx, ecx, edx;
/*
* syscall should always be enabled in longmode - so only become
* vendor specific (cpuid) if other modes are active...
*/
if (ctxt->mode == X86EMUL_MODE_PROT64)
return true;
eax = 0x00000000;
ecx = 0x00000000;
ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
/*
* Intel ("GenuineIntel")
* remark: Intel CPUs only support "syscall" in 64bit
* longmode. Also an 64bit guest with a
* 32bit compat-app running will #UD !! While this
* behaviour can be fixed (by emulating) into AMD
* response - CPUs of AMD can't behave like Intel.
*/
if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
return false;
/* AMD ("AuthenticAMD") */
if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
return true;
/* AMD ("AMDisbetter!") */
if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
return true;
/* default: (not Intel, not AMD), apply Intel's stricter rules... */
return false;
}
static int em_syscall(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
u64 efer = 0;
/* syscall is not available in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86)
return emulate_ud(ctxt);
if (!(em_syscall_is_enabled(ctxt)))
return emulate_ud(ctxt);
ops->get_msr(ctxt, MSR_EFER, &efer);
setup_syscalls_segments(ctxt, &cs, &ss);
if (!(efer & EFER_SCE))
return emulate_ud(ctxt);
ops->get_msr(ctxt, MSR_STAR, &msr_data);
msr_data >>= 32;
cs_sel = (u16)(msr_data & 0xfffc);
ss_sel = (u16)(msr_data + 8);
if (efer & EFER_LMA) {
cs.d = 0;
cs.l = 1;
}
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
if (efer & EFER_LMA) {
#ifdef CONFIG_X86_64
*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
ops->get_msr(ctxt,
ctxt->mode == X86EMUL_MODE_PROT64 ?
MSR_LSTAR : MSR_CSTAR, &msr_data);
ctxt->_eip = msr_data;
ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
ctxt->eflags &= ~msr_data;
ctxt->eflags |= X86_EFLAGS_FIXED;
#endif
} else {
/* legacy mode */
ops->get_msr(ctxt, MSR_STAR, &msr_data);
ctxt->_eip = (u32)msr_data;
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
}
return X86EMUL_CONTINUE;
}
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
u64 efer = 0;
ops->get_msr(ctxt, MSR_EFER, &efer);
/* inject #GP if in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL)
return emulate_gp(ctxt, 0);
/*
* Not recognized on AMD in compat mode (but is recognized in legacy
* mode).
*/
if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
&& !vendor_intel(ctxt))
return emulate_ud(ctxt);
/* sysenter/sysexit have not been tested in 64bit mode. */
if (ctxt->mode == X86EMUL_MODE_PROT64)
return X86EMUL_UNHANDLEABLE;
setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
ss_sel = cs_sel + 8;
if (efer & EFER_LMA) {
cs.d = 0;
cs.l = 1;
}
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
(u32)msr_data;
return X86EMUL_CONTINUE;
}
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data, rcx, rdx;
int usermode;
u16 cs_sel = 0, ss_sel = 0;
/* inject #GP if in real mode or Virtual 8086 mode */
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86)
return emulate_gp(ctxt, 0);
setup_syscalls_segments(ctxt, &cs, &ss);
if ((ctxt->rex_prefix & 0x8) != 0x0)
usermode = X86EMUL_MODE_PROT64;
else
usermode = X86EMUL_MODE_PROT32;
rcx = reg_read(ctxt, VCPU_REGS_RCX);
rdx = reg_read(ctxt, VCPU_REGS_RDX);
cs.dpl = 3;
ss.dpl = 3;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
switch (usermode) {
case X86EMUL_MODE_PROT32:
cs_sel = (u16)(msr_data + 16);
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
ss_sel = (u16)(msr_data + 24);
rcx = (u32)rcx;
rdx = (u32)rdx;
break;
case X86EMUL_MODE_PROT64:
cs_sel = (u16)(msr_data + 32);
if (msr_data == 0x0)
return emulate_gp(ctxt, 0);
ss_sel = cs_sel + 8;
cs.d = 0;
cs.l = 1;
if (is_noncanonical_address(rcx) ||
is_noncanonical_address(rdx))
return emulate_gp(ctxt, 0);
break;
}
cs_sel |= SEGMENT_RPL_MASK;
ss_sel |= SEGMENT_RPL_MASK;
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ctxt->_eip = rdx;
*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
return X86EMUL_CONTINUE;
}
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
{
int iopl;
if (ctxt->mode == X86EMUL_MODE_REAL)
return false;
if (ctxt->mode == X86EMUL_MODE_VM86)
return true;
iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
return ctxt->ops->cpl(ctxt) > iopl;
}
static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
u16 port, u16 len)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct tr_seg;
u32 base3;
int r;
u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
unsigned mask = (1 << len) - 1;
unsigned long base;
ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
if (!tr_seg.p)
return false;
if (desc_limit_scaled(&tr_seg) < 103)
return false;
base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
base |= ((u64)base3) << 32;
#endif
r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
if (r != X86EMUL_CONTINUE)
return false;
if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
return false;
r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
if (r != X86EMUL_CONTINUE)
return false;
if ((perm >> bit_idx) & mask)
return false;
return true;
}
static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
u16 port, u16 len)
{
if (ctxt->perm_ok)
return true;
if (emulator_bad_iopl(ctxt))
if (!emulator_io_port_access_allowed(ctxt, port, len))
return false;
ctxt->perm_ok = true;
return true;
}
static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
{
/*
* Intel CPUs mask the counter and pointers in quite strange
* manner when ECX is zero due to REP-string optimizations.
*/
#ifdef CONFIG_X86_64
if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
return;
*reg_write(ctxt, VCPU_REGS_RCX) = 0;
switch (ctxt->b) {
case 0xa4: /* movsb */
case 0xa5: /* movsd/w */
*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
/* fall through */
case 0xaa: /* stosb */
case 0xab: /* stosd/w */
*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
}
#endif
}
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
struct tss_segment_16 *tss)
{
tss->ip = ctxt->_eip;
tss->flag = ctxt->eflags;
tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
tss->si = reg_read(ctxt, VCPU_REGS_RSI);
tss->di = reg_read(ctxt, VCPU_REGS_RDI);
tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
}
static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
struct tss_segment_16 *tss)
{
int ret;
u8 cpl;
ctxt->_eip = tss->ip;
ctxt->eflags = tss->flag | 2;
*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
/*
* SDM says that segment selectors are loaded before segment
* descriptors
*/
set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
cpl = tss->cs & 3;
/*
* Now load segment descriptors. If fault happens at this stage
* it is handled in a context of new task
*/
ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
return X86EMUL_CONTINUE;
}
static int task_switch_16(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_16 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
save_state_to_tss16(ctxt, &tss_seg);
ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
if (old_tss_sel != 0xffff) {
tss_seg.prev_task_link = old_tss_sel;
ret = ops->write_std(ctxt, new_tss_base,
&tss_seg.prev_task_link,
sizeof tss_seg.prev_task_link,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
}
return load_state_from_tss16(ctxt, &tss_seg);
}
static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
struct tss_segment_32 *tss)
{
/* CR3 and ldt selector are not saved intentionally */
tss->eip = ctxt->_eip;
tss->eflags = ctxt->eflags;
tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
}
static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
struct tss_segment_32 *tss)
{
int ret;
u8 cpl;
if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
return emulate_gp(ctxt, 0);
ctxt->_eip = tss->eip;
ctxt->eflags = tss->eflags | 2;
/* General purpose registers */
*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
/*
* SDM says that segment selectors are loaded before segment
* descriptors. This is important because CPL checks will
* use CS.RPL.
*/
set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
/*
* If we're switching between Protected Mode and VM86, we need to make
* sure to update the mode before loading the segment descriptors so
* that the selectors are interpreted correctly.
*/
if (ctxt->eflags & X86_EFLAGS_VM) {
ctxt->mode = X86EMUL_MODE_VM86;
cpl = 3;
} else {
ctxt->mode = X86EMUL_MODE_PROT32;
cpl = tss->cs & 3;
}
/*
* Now load segment descriptors. If fault happenes at this stage
* it is handled in a context of new task
*/
ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
cpl, X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
return ret;
}
static int task_switch_32(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_32 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
u32 eip_offset = offsetof(struct tss_segment_32, eip);
u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
save_state_to_tss32(ctxt, &tss_seg);
/* Only GP registers and segment selectors are saved */
ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
ldt_sel_offset - eip_offset, &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
if (old_tss_sel != 0xffff) {
tss_seg.prev_task_link = old_tss_sel;
ret = ops->write_std(ctxt, new_tss_base,
&tss_seg.prev_task_link,
sizeof tss_seg.prev_task_link,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
}
return load_state_from_tss32(ctxt, &tss_seg);
}
static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int idt_index, int reason,
bool has_error_code, u32 error_code)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct curr_tss_desc, next_tss_desc;
int ret;
u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
ulong old_tss_base =
ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
u32 desc_limit;
ulong desc_addr, dr7;
/* FIXME: old_tss_base == ~0 ? */
ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
/* FIXME: check that next_tss_desc is tss */
/*
* Check privileges. The three cases are task switch caused by...
*
* 1. jmp/call/int to task gate: Check against DPL of the task gate
* 2. Exception/IRQ/iret: No check is performed
* 3. jmp/call to TSS/task-gate: No check is performed since the
* hardware checks it before exiting.
*/
if (reason == TASK_SWITCH_GATE) {
if (idt_index != -1) {
/* Software interrupts */
struct desc_struct task_gate_desc;
int dpl;
ret = read_interrupt_descriptor(ctxt, idt_index,
&task_gate_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
dpl = task_gate_desc.dpl;
if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
return emulate_gp(ctxt, (idt_index << 3) | 0x2);
}
}
desc_limit = desc_limit_scaled(&next_tss_desc);
if (!next_tss_desc.p ||
((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
desc_limit < 0x2b)) {
return emulate_ts(ctxt, tss_selector & 0xfffc);
}
if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
}
if (reason == TASK_SWITCH_IRET)
ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
/* set back link to prev task only if NT bit is set in eflags
note that old_tss_sel is not used after this point */
if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
old_tss_sel = 0xffff;
if (next_tss_desc.type & 8)
ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc);
else
ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
if (reason != TASK_SWITCH_IRET) {
next_tss_desc.type |= (1 << 1); /* set busy flag */
write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
}
ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
if (has_error_code) {
ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
ctxt->lock_prefix = 0;
ctxt->src.val = (unsigned long) error_code;
ret = em_push(ctxt);
}
ops->get_dr(ctxt, 7, &dr7);
ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
return ret;
}
int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int idt_index, int reason,
bool has_error_code, u32 error_code)
{
int rc;
invalidate_registers(ctxt);
ctxt->_eip = ctxt->eip;
ctxt->dst.type = OP_NONE;
rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
has_error_code, error_code);
if (rc == X86EMUL_CONTINUE) {
ctxt->eip = ctxt->_eip;
writeback_registers(ctxt);
}
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
}
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
struct operand *op)
{
int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
register_address_increment(ctxt, reg, df * op->bytes);
op->addr.mem.ea = register_address(ctxt, reg);
}
static int em_das(struct x86_emulate_ctxt *ctxt)
{
u8 al, old_al;
bool af, cf, old_cf;
cf = ctxt->eflags & X86_EFLAGS_CF;
al = ctxt->dst.val;
old_al = al;
old_cf = cf;
cf = false;
af = ctxt->eflags & X86_EFLAGS_AF;
if ((al & 0x0f) > 9 || af) {
al -= 6;
cf = old_cf | (al >= 250);
af = true;
} else {
af = false;
}
if (old_al > 0x99 || old_cf) {
al -= 0x60;
cf = true;
}
ctxt->dst.val = al;
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
if (cf)
ctxt->eflags |= X86_EFLAGS_CF;
if (af)
ctxt->eflags |= X86_EFLAGS_AF;
return X86EMUL_CONTINUE;
}
static int em_aam(struct x86_emulate_ctxt *ctxt)
{
u8 al, ah;
if (ctxt->src.val == 0)
return emulate_de(ctxt);
al = ctxt->dst.val & 0xff;
ah = al / ctxt->src.val;
al %= ctxt->src.val;
ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
return X86EMUL_CONTINUE;
}
static int em_aad(struct x86_emulate_ctxt *ctxt)
{
u8 al = ctxt->dst.val & 0xff;
u8 ah = (ctxt->dst.val >> 8) & 0xff;
al = (al + (ah * ctxt->src.val)) & 0xff;
ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
return X86EMUL_CONTINUE;
}
static int em_call(struct x86_emulate_ctxt *ctxt)
{
int rc;
long rel = ctxt->src.val;
ctxt->src.val = (unsigned long)ctxt->_eip;
rc = jmp_rel(ctxt, rel);
if (rc != X86EMUL_CONTINUE)
return rc;
return em_push(ctxt);
}
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
u16 sel, old_cs;
ulong old_eip;
int rc;
struct desc_struct old_desc, new_desc;
const struct x86_emulate_ops *ops = ctxt->ops;
int cpl = ctxt->ops->cpl(ctxt);
enum x86emul_mode prev_mode = ctxt->mode;
old_eip = ctxt->_eip;
ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
X86_TRANSFER_CALL_JMP, &new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
if (rc != X86EMUL_CONTINUE)
goto fail;
ctxt->src.val = old_cs;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
goto fail;
ctxt->src.val = old_eip;
rc = em_push(ctxt);
/* If we failed, we tainted the memory, but the very least we should
restore cs */
if (rc != X86EMUL_CONTINUE) {
pr_warn_once("faulting far call emulation tainted memory\n");
goto fail;
}
return rc;
fail:
ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
ctxt->mode = prev_mode;
return rc;
}
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_near(ctxt, eip);
if (rc != X86EMUL_CONTINUE)
return rc;
rsp_increment(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE;
}
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
/* Write back the register source. */
ctxt->src.val = ctxt->dst.val;
write_register_operand(&ctxt->src);
/* Write back the memory destination with implicit LOCK prefix. */
ctxt->dst.val = ctxt->src.orig_val;
ctxt->lock_prefix = 1;
return X86EMUL_CONTINUE;
}
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.val = ctxt->src2.val;
return fastop(ctxt, em_imul);
}
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.type = OP_REG;
ctxt->dst.bytes = ctxt->src.bytes;
ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
return X86EMUL_CONTINUE;
}
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
u64 tsc = 0;
ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
return X86EMUL_CONTINUE;
}
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{
u64 pmc;
if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
return emulate_gp(ctxt, 0);
*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
return X86EMUL_CONTINUE;
}
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
return X86EMUL_CONTINUE;
}
#define FFL(x) bit(X86_FEATURE_##x)
static int em_movbe(struct x86_emulate_ctxt *ctxt)
{
u32 ebx, ecx, edx, eax = 1;
u16 tmp;
/*
* Check MOVBE is set in the guest-visible CPUID leaf.
*/
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
if (!(ecx & FFL(MOVBE)))
return emulate_ud(ctxt);
switch (ctxt->op_bytes) {
case 2:
/*
* From MOVBE definition: "...When the operand size is 16 bits,
* the upper word of the destination register remains unchanged
* ..."
*
* Both casting ->valptr and ->val to u16 breaks strict aliasing
* rules so we have to do the operation almost per hand.
*/
tmp = (u16)ctxt->src.val;
ctxt->dst.val &= ~0xffffUL;
ctxt->dst.val |= (unsigned long)swab16(tmp);
break;
case 4:
ctxt->dst.val = swab32((u32)ctxt->src.val);
break;
case 8:
ctxt->dst.val = swab64(ctxt->src.val);
break;
default:
BUG();
}
return X86EMUL_CONTINUE;
}
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
return emulate_gp(ctxt, 0);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{
unsigned long val;
if (ctxt->mode == X86EMUL_MODE_PROT64)
val = ctxt->src.val & ~0ULL;
else
val = ctxt->src.val & ~0U;
/* #UD condition is already handled. */
if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
return emulate_gp(ctxt, 0);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{
u64 msr_data;
msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{
u64 msr_data;
if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
return emulate_gp(ctxt, 0);
*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
return X86EMUL_CONTINUE;
}
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->modrm_reg > VCPU_SREG_GS)
return emulate_ud(ctxt);
ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
ctxt->dst.bytes = 2;
return X86EMUL_CONTINUE;
}
static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
return emulate_ud(ctxt);
if (ctxt->modrm_reg == VCPU_SREG_SS)
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
}
static int em_lldt(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
}
static int em_ltr(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
}
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
int rc;
ulong linear;
rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->ops->invlpg(ctxt, linear);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
ulong cr0;
cr0 = ctxt->ops->get_cr(ctxt, 0);
cr0 &= ~X86_CR0_TS;
ctxt->ops->set_cr(ctxt, 0, cr0);
return X86EMUL_CONTINUE;
}
static int em_hypercall(struct x86_emulate_ctxt *ctxt)
{
int rc = ctxt->ops->fix_hypercall(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
/* Let the processor re-execute the fixed hypercall */
ctxt->_eip = ctxt->eip;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
void (*get)(struct x86_emulate_ctxt *ctxt,
struct desc_ptr *ptr))
{
struct desc_ptr desc_ptr;
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
get(ctxt, &desc_ptr);
if (ctxt->op_bytes == 2) {
ctxt->op_bytes = 4;
desc_ptr.address &= 0x00ffffff;
}
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return segmented_write(ctxt, ctxt->dst.addr.mem,
&desc_ptr, 2 + ctxt->op_bytes);
}
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{
return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
}
static int em_sidt(struct x86_emulate_ctxt *ctxt)
{
return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}
static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
{
struct desc_ptr desc_ptr;
int rc;
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
rc = read_descriptor(ctxt, ctxt->src.addr.mem,
&desc_ptr.size, &desc_ptr.address,
ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
if (ctxt->mode == X86EMUL_MODE_PROT64 &&
is_noncanonical_address(desc_ptr.address))
return emulate_gp(ctxt, 0);
if (lgdt)
ctxt->ops->set_gdt(ctxt, &desc_ptr);
else
ctxt->ops->set_idt(ctxt, &desc_ptr);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
return em_lgdt_lidt(ctxt, true);
}
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
return em_lgdt_lidt(ctxt, false);
}
static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->dst.type == OP_MEM)
ctxt->dst.bytes = 2;
ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
| (ctxt->src.val & 0x0f));
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
register_address_increment(ctxt, VCPU_REGS_RCX, -1);
if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
(ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
rc = jmp_rel(ctxt, ctxt->src.val);
return rc;
}
static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
rc = jmp_rel(ctxt, ctxt->src.val);
return rc;
}
static int em_in(struct x86_emulate_ctxt *ctxt)
{
if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
&ctxt->dst.val))
return X86EMUL_IO_NEEDED;
return X86EMUL_CONTINUE;
}
static int em_out(struct x86_emulate_ctxt *ctxt)
{
ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
&ctxt->src.val, 1);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
if (emulator_bad_iopl(ctxt))
return emulate_gp(ctxt, 0);
ctxt->eflags &= ~X86_EFLAGS_IF;
return X86EMUL_CONTINUE;
}
static int em_sti(struct x86_emulate_ctxt *ctxt)
{
if (emulator_bad_iopl(ctxt))
return emulate_gp(ctxt, 0);
ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
ctxt->eflags |= X86_EFLAGS_IF;
return X86EMUL_CONTINUE;
}
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
eax = reg_read(ctxt, VCPU_REGS_RAX);
ecx = reg_read(ctxt, VCPU_REGS_RCX);
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
*reg_write(ctxt, VCPU_REGS_RAX) = eax;
*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
*reg_write(ctxt, VCPU_REGS_RDX) = edx;
return X86EMUL_CONTINUE;
}
static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
u32 flags;
flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
X86_EFLAGS_SF;
flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
ctxt->eflags &= ~0xffUL;
ctxt->eflags |= flags | X86_EFLAGS_FIXED;
return X86EMUL_CONTINUE;
}
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
return X86EMUL_CONTINUE;
}
static int em_bswap(struct x86_emulate_ctxt *ctxt)
{
switch (ctxt->op_bytes) {
#ifdef CONFIG_X86_64
case 8:
asm("bswap %0" : "+r"(ctxt->dst.val));
break;
#endif
default:
asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
break;
}
return X86EMUL_CONTINUE;
}
static int em_clflush(struct x86_emulate_ctxt *ctxt)
{
/* emulating clflush regardless of cpuid */
return X86EMUL_CONTINUE;
}
static int em_movsxd(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.val = (s32) ctxt->src.val;
return X86EMUL_CONTINUE;
}
static bool valid_cr(int nr)
{
switch (nr) {
case 0:
case 2 ... 4:
case 8:
return true;
default:
return false;
}
}
static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
if (!valid_cr(ctxt->modrm_reg))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
}
static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
u64 new_val = ctxt->src.val64;
int cr = ctxt->modrm_reg;
u64 efer = 0;
static u64 cr_reserved_bits[] = {
0xffffffff00000000ULL,
0, 0, 0, /* CR3 checked later */
CR4_RESERVED_BITS,
0, 0, 0,
CR8_RESERVED_BITS,
};
if (!valid_cr(cr))
return emulate_ud(ctxt);
if (new_val & cr_reserved_bits[cr])
return emulate_gp(ctxt, 0);
switch (cr) {
case 0: {
u64 cr4;
if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
return emulate_gp(ctxt, 0);
cr4 = ctxt->ops->get_cr(ctxt, 4);
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
!(cr4 & X86_CR4_PAE))
return emulate_gp(ctxt, 0);
break;
}
case 3: {
u64 rsvd = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
if (new_val & rsvd)
return emulate_gp(ctxt, 0);
break;
}
case 4: {
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
return emulate_gp(ctxt, 0);
break;
}
}
return X86EMUL_CONTINUE;
}
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
unsigned long dr7;
ctxt->ops->get_dr(ctxt, 7, &dr7);
/* Check if DR7.Global_Enable is set */
return dr7 & (1 << 13);
}
static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
int dr = ctxt->modrm_reg;
u64 cr4;
if (dr > 7)
return emulate_ud(ctxt);
cr4 = ctxt->ops->get_cr(ctxt, 4);
if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
return emulate_ud(ctxt);
if (check_dr7_gd(ctxt)) {
ulong dr6;
ctxt->ops->get_dr(ctxt, 6, &dr6);
dr6 &= ~15;
dr6 |= DR6_BD | DR6_RTM;
ctxt->ops->set_dr(ctxt, 6, dr6);
return emulate_db(ctxt);
}
return X86EMUL_CONTINUE;
}
static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
u64 new_val = ctxt->src.val64;
int dr = ctxt->modrm_reg;
if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
return emulate_gp(ctxt, 0);
return check_dr_read(ctxt);
}
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
u64 efer;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (!(efer & EFER_SVME))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
}
static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
/* Valid physical address? */
if (rax & 0xffff000000000000ULL)
return emulate_gp(ctxt, 0);
return check_svme(ctxt);
}
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
}
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
ctxt->ops->check_pmc(ctxt, rcx))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
ctxt->src.bytes = min(ctxt->src.bytes, 4u);
if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
#define D(_y) { .flags = (_y) }
#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
.intercept = x86_intercept_##_i, .check_perm = (_p) }
#define N D(NotImpl)
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
#define II(_f, _e, _i) \
{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
#define IIP(_f, _e, _i, _p) \
{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
.intercept = x86_intercept_##_i, .check_perm = (_p) }
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
#define D2bv(_f) D((_f) | ByteOp), D(_f)
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
#define I2bvIP(_f, _e, _i, _p) \
IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
static const struct opcode group7_rm0[] = {
N,
I(SrcNone | Priv | EmulateOnUD, em_hypercall),
N, N, N, N, N, N,
};
static const struct opcode group7_rm1[] = {
DI(SrcNone | Priv, monitor),
DI(SrcNone | Priv, mwait),
N, N, N, N, N, N,
};
static const struct opcode group7_rm3[] = {
DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
DIP(SrcNone | Prot | Priv, stgi, check_svme),
DIP(SrcNone | Prot | Priv, clgi, check_svme),
DIP(SrcNone | Prot | Priv, skinit, check_svme),
DIP(SrcNone | Prot | Priv, invlpga, check_svme),
};
static const struct opcode group7_rm7[] = {
N,
DIP(SrcNone, rdtscp, check_rdtsc),
N, N, N, N, N, N,
};
static const struct opcode group1[] = {
F(Lock, em_add),
F(Lock | PageTable, em_or),
F(Lock, em_adc),
F(Lock, em_sbb),
F(Lock | PageTable, em_and),
F(Lock, em_sub),
F(Lock, em_xor),
F(NoWrite, em_cmp),
};
static const struct opcode group1A[] = {
I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
};
static const struct opcode group2[] = {
F(DstMem | ModRM, em_rol),
F(DstMem | ModRM, em_ror),
F(DstMem | ModRM, em_rcl),
F(DstMem | ModRM, em_rcr),
F(DstMem | ModRM, em_shl),
F(DstMem | ModRM, em_shr),
F(DstMem | ModRM, em_shl),
F(DstMem | ModRM, em_sar),
};
static const struct opcode group3[] = {
F(DstMem | SrcImm | NoWrite, em_test),
F(DstMem | SrcImm | NoWrite, em_test),
F(DstMem | SrcNone | Lock, em_not),
F(DstMem | SrcNone | Lock, em_neg),
F(DstXacc | Src2Mem, em_mul_ex),
F(DstXacc | Src2Mem, em_imul_ex),
F(DstXacc | Src2Mem, em_div_ex),
F(DstXacc | Src2Mem, em_idiv_ex),
};
static const struct opcode group4[] = {
F(ByteOp | DstMem | SrcNone | Lock, em_inc),
F(ByteOp | DstMem | SrcNone | Lock, em_dec),
N, N, N, N, N, N,
};
static const struct opcode group5[] = {
F(DstMem | SrcNone | Lock, em_inc),
F(DstMem | SrcNone | Lock, em_dec),
I(SrcMem | NearBranch, em_call_near_abs),
I(SrcMemFAddr | ImplicitOps, em_call_far),
I(SrcMem | NearBranch, em_jmp_abs),
I(SrcMemFAddr | ImplicitOps, em_jmp_far),
I(SrcMem | Stack, em_push), D(Undefined),
};
static const struct opcode group6[] = {
DI(Prot | DstMem, sldt),
DI(Prot | DstMem, str),
II(Prot | Priv | SrcMem16, em_lldt, lldt),
II(Prot | Priv | SrcMem16, em_ltr, ltr),
N, N, N, N,
};
static const struct group_dual group7 = { {
II(Mov | DstMem, em_sgdt, sgdt),
II(Mov | DstMem, em_sidt, sidt),
II(SrcMem | Priv, em_lgdt, lgdt),
II(SrcMem | Priv, em_lidt, lidt),
II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
}, {
EXT(0, group7_rm0),
EXT(0, group7_rm1),
N, EXT(0, group7_rm3),
II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
EXT(0, group7_rm7),
} };
static const struct opcode group8[] = {
N, N, N, N,
F(DstMem | SrcImmByte | NoWrite, em_bt),
F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
F(DstMem | SrcImmByte | Lock, em_btr),
F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
};
static const struct group_dual group9 = { {
N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
}, {
N, N, N, N, N, N, N, N,
} };
static const struct opcode group11[] = {
I(DstMem | SrcImm | Mov | PageTable, em_mov),
X7(D(Undefined)),
};
static const struct gprefix pfx_0f_ae_7 = {
I(SrcMem | ByteOp, em_clflush), N, N, N,
};
static const struct group_dual group15 = { {
N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
}, {
N, N, N, N, N, N, N, N,
} };
static const struct gprefix pfx_0f_6f_0f_7f = {
I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
};
static const struct instr_dual instr_dual_0f_2b = {
I(0, em_mov), N
};
static const struct gprefix pfx_0f_2b = {
ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
};
static const struct gprefix pfx_0f_28_0f_29 = {
I(Aligned, em_mov), I(Aligned, em_mov), N, N,
};
static const struct gprefix pfx_0f_e7 = {
N, I(Sse, em_mov), N, N,
};
static const struct escape escape_d9 = { {
N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xC8 - 0xCF */
N, N, N, N, N, N, N, N,
/* 0xD0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
N, N, N, N, N, N, N, N,
/* 0xE8 - 0xEF */
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xF7 */
N, N, N, N, N, N, N, N,
/* 0xF8 - 0xFF */
N, N, N, N, N, N, N, N,
} };
static const struct escape escape_db = { {
N, N, N, N, N, N, N, N,
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xC8 - 0xCF */
N, N, N, N, N, N, N, N,
/* 0xD0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
/* 0xE8 - 0xEF */
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xF7 */
N, N, N, N, N, N, N, N,
/* 0xF8 - 0xFF */
N, N, N, N, N, N, N, N,
} };
static const struct escape escape_dd = { {
N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xC8 - 0xCF */
N, N, N, N, N, N, N, N,
/* 0xD0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
N, N, N, N, N, N, N, N,
/* 0xE8 - 0xEF */
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xF7 */
N, N, N, N, N, N, N, N,
/* 0xF8 - 0xFF */
N, N, N, N, N, N, N, N,
} };
static const struct instr_dual instr_dual_0f_c3 = {
I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
};
static const struct mode_dual mode_dual_63 = {
N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
};
static const struct opcode opcode_table[256] = {
/* 0x00 - 0x07 */
F6ALU(Lock, em_add),
I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
/* 0x08 - 0x0F */
F6ALU(Lock | PageTable, em_or),
I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
N,
/* 0x10 - 0x17 */
F6ALU(Lock, em_adc),
I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
/* 0x18 - 0x1F */
F6ALU(Lock, em_sbb),
I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
/* 0x20 - 0x27 */
F6ALU(Lock | PageTable, em_and), N, N,
/* 0x28 - 0x2F */
F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
/* 0x30 - 0x37 */
F6ALU(Lock, em_xor), N, N,
/* 0x38 - 0x3F */
F6ALU(NoWrite, em_cmp), N, N,
/* 0x40 - 0x4F */
X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
/* 0x50 - 0x57 */
X8(I(SrcReg | Stack, em_push)),
/* 0x58 - 0x5F */
X8(I(DstReg | Stack, em_pop)),
/* 0x60 - 0x67 */
I(ImplicitOps | Stack | No64, em_pusha),
I(ImplicitOps | Stack | No64, em_popa),
N, MD(ModRM, &mode_dual_63),
N, N, N, N,
/* 0x68 - 0x6F */
I(SrcImm | Mov | Stack, em_push),
I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
I(SrcImmByte | Mov | Stack, em_push),
I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
/* 0x70 - 0x7F */
X16(D(SrcImmByte | NearBranch)),
/* 0x80 - 0x87 */
G(ByteOp | DstMem | SrcImm, group1),
G(DstMem | SrcImm, group1),
G(ByteOp | DstMem | SrcImm | No64, group1),
G(DstMem | SrcImmByte, group1),
F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
/* 0x88 - 0x8F */
I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
D(ModRM | SrcMem | NoAccess | DstReg),
I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
G(0, group1A),
/* 0x90 - 0x97 */
DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
/* 0x98 - 0x9F */
D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
I(SrcImmFAddr | No64, em_call_far), N,
II(ImplicitOps | Stack, em_pushf, pushf),
II(ImplicitOps | Stack, em_popf, popf),
I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
/* 0xA0 - 0xA7 */
I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
I2bv(SrcSI | DstDI | Mov | String, em_mov),
F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
/* 0xA8 - 0xAF */
F2bv(DstAcc | SrcImm | NoWrite, em_test),
I2bv(SrcAcc | DstDI | Mov | String, em_mov),
I2bv(SrcSI | DstAcc | Mov | String, em_mov),
F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
/* 0xB0 - 0xB7 */
X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
/* 0xB8 - 0xBF */
X8(I(DstReg | SrcImm64 | Mov, em_mov)),
/* 0xC0 - 0xC7 */
G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
I(ImplicitOps | NearBranch, em_ret),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
G(ByteOp, group11), G(0, group11),
/* 0xC8 - 0xCF */
I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
I(ImplicitOps | SrcImmU16, em_ret_far_imm),
I(ImplicitOps, em_ret_far),
D(ImplicitOps), DI(SrcImmByte, intn),
D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
/* 0xD0 - 0xD7 */
G(Src2One | ByteOp, group2), G(Src2One, group2),
G(Src2CL | ByteOp, group2), G(Src2CL, group2),
I(DstAcc | SrcImmUByte | No64, em_aam),
I(DstAcc | SrcImmUByte | No64, em_aad),
F(DstAcc | ByteOp | No64, em_salc),
I(DstAcc | SrcXLat | ByteOp, em_mov),
/* 0xD8 - 0xDF */
N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
/* 0xE0 - 0xE7 */
X3(I(SrcImmByte | NearBranch, em_loop)),
I(SrcImmByte | NearBranch, em_jcxz),
I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
/* 0xE8 - 0xEF */
I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
I(SrcImmFAddr | No64, em_jmp_far),
D(SrcImmByte | ImplicitOps | NearBranch),
I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
/* 0xF0 - 0xF7 */
N, DI(ImplicitOps, icebp), N, N,
DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
G(ByteOp, group3), G(0, group3),
/* 0xF8 - 0xFF */
D(ImplicitOps), D(ImplicitOps),
I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};
static const struct opcode twobyte_table[256] = {
/* 0x00 - 0x0F */
G(0, group6), GD(0, &group7), N, N,
N, I(ImplicitOps | EmulateOnUD, em_syscall),
II(ImplicitOps | Priv, em_clts, clts), N,
DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
/* 0x10 - 0x1F */
N, N, N, N, N, N, N, N,
D(ImplicitOps | ModRM | SrcMem | NoAccess),
N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
/* 0x20 - 0x2F */
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
check_cr_write),
IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
check_dr_write),
N, N, N, N,
GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
N, N, N, N,
/* 0x30 - 0x3F */
II(ImplicitOps | Priv, em_wrmsr, wrmsr),
IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
II(ImplicitOps | Priv, em_rdmsr, rdmsr),
IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
I(ImplicitOps | EmulateOnUD, em_sysenter),
I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
N, N,
N, N, N, N, N, N, N, N,
/* 0x40 - 0x4F */
X16(D(DstReg | SrcMem | ModRM)),
/* 0x50 - 0x5F */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
/* 0x60 - 0x6F */
N, N, N, N,
N, N, N, N,
N, N, N, N,
N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
/* 0x70 - 0x7F */
N, N, N, N,
N, N, N, N,
N, N, N, N,
N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
/* 0x80 - 0x8F */
X16(D(SrcImm | NearBranch)),
/* 0x90 - 0x9F */
X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
/* 0xA0 - 0xA7 */
I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
II(ImplicitOps, em_cpuid, cpuid),
F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
/* 0xA8 - 0xAF */
I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
/* 0xB0 - 0xB7 */
I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xB8 - 0xBF */
N, N,
G(BitOp, group8),
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
I(DstReg | SrcMem | ModRM, em_bsf_c),
I(DstReg | SrcMem | ModRM, em_bsr_c),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xC0 - 0xC7 */
F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
N, ID(0, &instr_dual_0f_c3),
N, N, N, GD(0, &group9),
/* 0xC8 - 0xCF */
X8(I(DstReg, em_bswap)),
/* 0xD0 - 0xDF */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
/* 0xE0 - 0xEF */
N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xFF */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};
static const struct instr_dual instr_dual_0f_38_f0 = {
I(DstReg | SrcMem | Mov, em_movbe), N
};
static const struct instr_dual instr_dual_0f_38_f1 = {
I(DstMem | SrcReg | Mov, em_movbe), N
};
static const struct gprefix three_byte_0f_38_f0 = {
ID(0, &instr_dual_0f_38_f0), N, N, N
};
static const struct gprefix three_byte_0f_38_f1 = {
ID(0, &instr_dual_0f_38_f1), N, N, N
};
/*
* Insns below are selected by the prefix which indexed by the third opcode
* byte.
*/
static const struct opcode opcode_map_0f_38[256] = {
/* 0x00 - 0x7f */
X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
/* 0x80 - 0xef */
X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
/* 0xf0 - 0xf1 */
GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
/* 0xf2 - 0xff */
N, N, X4(N), X8(N)
};
#undef D
#undef N
#undef G
#undef GD
#undef I
#undef GP
#undef EXT
#undef MD
#undef ID
#undef D2bv
#undef D2bvIP
#undef I2bv
#undef I2bvIP
#undef I6ALU
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
{
unsigned size;
size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
if (size == 8)
size = 4;
return size;
}
static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
unsigned size, bool sign_extension)
{
int rc = X86EMUL_CONTINUE;
op->type = OP_IMM;
op->bytes = size;
op->addr.mem.ea = ctxt->_eip;
/* NB. Immediates are sign-extended as necessary. */
switch (op->bytes) {
case 1:
op->val = insn_fetch(s8, ctxt);
break;
case 2:
op->val = insn_fetch(s16, ctxt);
break;
case 4:
op->val = insn_fetch(s32, ctxt);
break;
case 8:
op->val = insn_fetch(s64, ctxt);
break;
}
if (!sign_extension) {
switch (op->bytes) {
case 1:
op->val &= 0xff;
break;
case 2:
op->val &= 0xffff;
break;
case 4:
op->val &= 0xffffffff;
break;
}
}
done:
return rc;
}
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
unsigned d)
{
int rc = X86EMUL_CONTINUE;
switch (d) {
case OpReg:
decode_register_operand(ctxt, op);
break;
case OpImmUByte:
rc = decode_imm(ctxt, op, 1, false);
break;
case OpMem:
ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
mem_common:
*op = ctxt->memop;
ctxt->memopp = op;
if (ctxt->d & BitOp)
fetch_bit_operand(ctxt);
op->orig_val = op->val;
break;
case OpMem64:
ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
goto mem_common;
case OpAcc:
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpAccLo:
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpAccHi:
if (ctxt->d & ByteOp) {
op->type = OP_NONE;
break;
}
op->type = OP_REG;
op->bytes = ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpDI:
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
register_address(ctxt, VCPU_REGS_RDI);
op->addr.mem.seg = VCPU_SREG_ES;
op->val = 0;
op->count = 1;
break;
case OpDX:
op->type = OP_REG;
op->bytes = 2;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
fetch_register_operand(op);
break;
case OpCL:
op->type = OP_IMM;
op->bytes = 1;
op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
break;
case OpImmByte:
rc = decode_imm(ctxt, op, 1, true);
break;
case OpOne:
op->type = OP_IMM;
op->bytes = 1;
op->val = 1;
break;
case OpImm:
rc = decode_imm(ctxt, op, imm_size(ctxt), true);
break;
case OpImm64:
rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
break;
case OpMem8:
ctxt->memop.bytes = 1;
if (ctxt->memop.type == OP_REG) {
ctxt->memop.addr.reg = decode_register(ctxt,
ctxt->modrm_rm, true);
fetch_register_operand(&ctxt->memop);
}
goto mem_common;
case OpMem16:
ctxt->memop.bytes = 2;
goto mem_common;
case OpMem32:
ctxt->memop.bytes = 4;
goto mem_common;
case OpImmU16:
rc = decode_imm(ctxt, op, 2, false);
break;
case OpImmU:
rc = decode_imm(ctxt, op, imm_size(ctxt), false);
break;
case OpSI:
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
register_address(ctxt, VCPU_REGS_RSI);
op->addr.mem.seg = ctxt->seg_override;
op->val = 0;
op->count = 1;
break;
case OpXLat:
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
address_mask(ctxt,
reg_read(ctxt, VCPU_REGS_RBX) +
(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
op->addr.mem.seg = ctxt->seg_override;
op->val = 0;
break;
case OpImmFAddr:
op->type = OP_IMM;
op->addr.mem.ea = ctxt->_eip;
op->bytes = ctxt->op_bytes + 2;
insn_fetch_arr(op->valptr, op->bytes, ctxt);
break;
case OpMemFAddr:
ctxt->memop.bytes = ctxt->op_bytes + 2;
goto mem_common;
case OpES:
op->type = OP_IMM;
op->val = VCPU_SREG_ES;
break;
case OpCS:
op->type = OP_IMM;
op->val = VCPU_SREG_CS;
break;
case OpSS:
op->type = OP_IMM;
op->val = VCPU_SREG_SS;
break;
case OpDS:
op->type = OP_IMM;
op->val = VCPU_SREG_DS;
break;
case OpFS:
op->type = OP_IMM;
op->val = VCPU_SREG_FS;
break;
case OpGS:
op->type = OP_IMM;
op->val = VCPU_SREG_GS;
break;
case OpImplicit:
/* Special instructions do their own operand decoding. */
default:
op->type = OP_NONE; /* Disable writeback. */
break;
}
done:
return rc;
}
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
bool op_prefix = false;
bool has_seg_override = false;
struct opcode opcode;
ctxt->memop.type = OP_NONE;
ctxt->memopp = NULL;
ctxt->_eip = ctxt->eip;
ctxt->fetch.ptr = ctxt->fetch.data;
ctxt->fetch.end = ctxt->fetch.data + insn_len;
ctxt->opcode_len = 1;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
else {
rc = __do_insn_fetch_bytes(ctxt, 1);
if (rc != X86EMUL_CONTINUE)
return rc;
}
switch (mode) {
case X86EMUL_MODE_REAL:
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
case X86EMUL_MODE_PROT32:
def_op_bytes = def_ad_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86EMUL_MODE_PROT64:
def_op_bytes = 4;
def_ad_bytes = 8;
break;
#endif
default:
return EMULATION_FAILED;
}
ctxt->op_bytes = def_op_bytes;
ctxt->ad_bytes = def_ad_bytes;
/* Legacy prefixes. */
for (;;) {
switch (ctxt->b = insn_fetch(u8, ctxt)) {
case 0x66: /* operand-size override */
op_prefix = true;
/* switch between 2/4 bytes */
ctxt->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
has_seg_override = true;
ctxt->seg_override = (ctxt->b >> 3) & 3;
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
has_seg_override = true;
ctxt->seg_override = ctxt->b & 7;
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
ctxt->rex_prefix = ctxt->b;
continue;
case 0xf0: /* LOCK */
ctxt->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
case 0xf3: /* REP/REPE/REPZ */
ctxt->rep_prefix = ctxt->b;
break;
default:
goto done_prefixes;
}
/* Any legacy prefix after a REX prefix nullifies its effect. */
ctxt->rex_prefix = 0;
}
done_prefixes:
/* REX prefix. */
if (ctxt->rex_prefix & 8)
ctxt->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */
opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
if (ctxt->b == 0x0f) {
ctxt->opcode_len = 2;
ctxt->b = insn_fetch(u8, ctxt);
opcode = twobyte_table[ctxt->b];
/* 0F_38 opcode map */
if (ctxt->b == 0x38) {
ctxt->opcode_len = 3;
ctxt->b = insn_fetch(u8, ctxt);
opcode = opcode_map_0f_38[ctxt->b];
}
}
ctxt->d = opcode.flags;
if (ctxt->d & ModRM)
ctxt->modrm = insn_fetch(u8, ctxt);
/* vex-prefix instructions are not implemented */
if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
(mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
ctxt->d = NotImpl;
}
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
goffset = (ctxt->modrm >> 3) & 7;
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
else
opcode = opcode.u.gdual->mod012[goffset];
break;
case RMExt:
goffset = ctxt->modrm & 7;
opcode = opcode.u.group[goffset];
break;
case Prefix:
if (ctxt->rep_prefix && op_prefix)
return EMULATION_FAILED;
simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
switch (simd_prefix) {
case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
}
break;
case Escape:
if (ctxt->modrm > 0xbf)
opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
else
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
break;
case InstrDual:
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.idual->mod3;
else
opcode = opcode.u.idual->mod012;
break;
case ModeDual:
if (ctxt->mode == X86EMUL_MODE_PROT64)
opcode = opcode.u.mdual->mode64;
else
opcode = opcode.u.mdual->mode32;
break;
default:
return EMULATION_FAILED;
}
ctxt->d &= ~(u64)GroupMask;
ctxt->d |= opcode.flags;
}
/* Unrecognised? */
if (ctxt->d == 0)
return EMULATION_FAILED;
ctxt->execute = opcode.u.execute;
if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
(NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
No16))) {
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
*/
ctxt->check_perm = opcode.check_perm;
ctxt->intercept = opcode.intercept;
if (ctxt->d & NotImpl)
return EMULATION_FAILED;
if (mode == X86EMUL_MODE_PROT64) {
if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
ctxt->op_bytes = 8;
else if (ctxt->d & NearBranch)
ctxt->op_bytes = 8;
}
if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
else
ctxt->op_bytes = 4;
}
if ((ctxt->d & No16) && ctxt->op_bytes == 2)
ctxt->op_bytes = 4;
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
else if (ctxt->d & Mmx)
ctxt->op_bytes = 8;
}
/* ModRM and SIB bytes. */
if (ctxt->d & ModRM) {
rc = decode_modrm(ctxt, &ctxt->memop);
if (!has_seg_override) {
has_seg_override = true;
ctxt->seg_override = ctxt->modrm_seg;
}
} else if (ctxt->d & MemAbs)
rc = decode_abs(ctxt, &ctxt->memop);
if (rc != X86EMUL_CONTINUE)
goto done;
if (!has_seg_override)
ctxt->seg_override = VCPU_SREG_DS;
ctxt->memop.addr.mem.seg = ctxt->seg_override;
/*
* Decode and fetch the source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Decode and fetch the second source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
if (ctxt->rip_relative && likely(ctxt->memopp))
ctxt->memopp->addr.mem.ea = address_mask(ctxt,
ctxt->memopp->addr.mem.ea + ctxt->_eip);
done:
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
}
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{
return ctxt->d & PageTable;
}
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
/* The second termination condition only applies for REPE
* and REPNE. Test if the repeat string operation prefix is
* REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
* corresponding termination condition according to:
* - if REPE/REPZ and ZF = 0 then done
* - if REPNE/REPNZ and ZF = 1 then done
*/
if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
(ctxt->b == 0xae) || (ctxt->b == 0xaf))
&& (((ctxt->rep_prefix == REPE_PREFIX) &&
((ctxt->eflags & X86_EFLAGS_ZF) == 0))
|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
return true;
return false;
}
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
bool fault = false;
ctxt->ops->get_fpu(ctxt);
asm volatile("1: fwait \n\t"
"2: \n\t"
".pushsection .fixup,\"ax\" \n\t"
"3: \n\t"
"movb $1, %[fault] \n\t"
"jmp 2b \n\t"
".popsection \n\t"
_ASM_EXTABLE(1b, 3b)
: [fault]"+qm"(fault));
ctxt->ops->put_fpu(ctxt);
if (unlikely(fault))
return emulate_exception(ctxt, MF_VECTOR, 0, false);
return X86EMUL_CONTINUE;
}
static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
if (op->type == OP_MM)
read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
}
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{
register void *__sp asm(_ASM_SP);
ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
if (!(ctxt->d & ByteOp))
fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
: "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
[fastop]"+S"(fop), "+r"(__sp)
: "c"(ctxt->src2.val));
ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
if (!fop) /* exception is returned in fop variable */
return emulate_de(ctxt);
return X86EMUL_CONTINUE;
}
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
memset(&ctxt->rip_relative, 0,
(void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
ctxt->io_read.pos = 0;
ctxt->io_read.end = 0;
ctxt->mem_read.end = 0;
}
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
int rc = X86EMUL_CONTINUE;
int saved_dst_type = ctxt->dst.type;
ctxt->mem_read.pos = 0;
/* LOCK prefix is allowed only with some instructions */
if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
rc = emulate_ud(ctxt);
goto done;
}
if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
rc = emulate_ud(ctxt);
goto done;
}
if (unlikely(ctxt->d &
(No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
(ctxt->d & Undefined)) {
rc = emulate_ud(ctxt);
goto done;
}
if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
|| ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
rc = emulate_ud(ctxt);
goto done;
}
if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
rc = emulate_nm(ctxt);
goto done;
}
if (ctxt->d & Mmx) {
rc = flush_pending_x87_faults(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Now that we know the fpu is exception safe, we can fetch
* operands from it.
*/
fetch_possible_mmx_operand(ctxt, &ctxt->src);
fetch_possible_mmx_operand(ctxt, &ctxt->src2);
if (!(ctxt->d & Mov))
fetch_possible_mmx_operand(ctxt, &ctxt->dst);
}
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
/* Instruction can only be executed in protected mode */
if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
rc = emulate_ud(ctxt);
goto done;
}
/* Privileged instruction can be executed only in CPL=0 */
if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
if (ctxt->d & PrivUD)
rc = emulate_ud(ctxt);
else
rc = emulate_gp(ctxt, 0);
goto done;
}
/* Do instruction specific permission checks */
if (ctxt->d & CheckPerm) {
rc = ctxt->check_perm(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (ctxt->rep_prefix && (ctxt->d & String)) {
/* All REP prefixes have the same first termination condition */
if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
string_registers_quirk(ctxt);
ctxt->eip = ctxt->_eip;
ctxt->eflags &= ~X86_EFLAGS_RF;
goto done;
}
}
}
if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
rc = segmented_read(ctxt, ctxt->src.addr.mem,
ctxt->src.valptr, ctxt->src.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
ctxt->src.orig_val64 = ctxt->src.val64;
}
if (ctxt->src2.type == OP_MEM) {
rc = segmented_read(ctxt, ctxt->src2.addr.mem,
&ctxt->src2.val, ctxt->src2.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if ((ctxt->d & DstMask) == ImplicitOps)
goto special_insn;
if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
/* optimisation - avoid slow emulated read if Mov */
rc = segmented_read(ctxt, ctxt->dst.addr.mem,
&ctxt->dst.val, ctxt->dst.bytes);
if (rc != X86EMUL_CONTINUE) {
if (!(ctxt->d & NoWrite) &&
rc == X86EMUL_PROPAGATE_FAULT &&
ctxt->exception.vector == PF_VECTOR)
ctxt->exception.error_code |= PFERR_WRITE_MASK;
goto done;
}
}
/* Copy full 64-bit value for CMPXCHG8B. */
ctxt->dst.orig_val64 = ctxt->dst.val64;
special_insn:
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (ctxt->rep_prefix && (ctxt->d & String))
ctxt->eflags |= X86_EFLAGS_RF;
else
ctxt->eflags &= ~X86_EFLAGS_RF;
if (ctxt->execute) {
if (ctxt->d & Fastop) {
void (*fop)(struct fastop *) = (void *)ctxt->execute;
rc = fastop(ctxt, fop);
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
}
rc = ctxt->execute(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
}
if (ctxt->opcode_len == 2)
goto twobyte_insn;
else if (ctxt->opcode_len == 3)
goto threebyte_insn;
switch (ctxt->b) {
case 0x70 ... 0x7f: /* jcc (short) */
if (test_cc(ctxt->b, ctxt->eflags))
rc = jmp_rel(ctxt, ctxt->src.val);
break;
case 0x8d: /* lea r16/r32, m */
ctxt->dst.val = ctxt->src.addr.mem.ea;
break;
case 0x90 ... 0x97: /* nop / xchg reg, rax */
if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
ctxt->dst.type = OP_NONE;
else
rc = em_xchg(ctxt);
break;
case 0x98: /* cbw/cwde/cdqe */
switch (ctxt->op_bytes) {
case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
}
break;
case 0xcc: /* int3 */
rc = emulate_int(ctxt, 3);
break;
case 0xcd: /* int n */
rc = emulate_int(ctxt, ctxt->src.val);
break;
case 0xce: /* into */
if (ctxt->eflags & X86_EFLAGS_OF)
rc = emulate_int(ctxt, 4);
break;
case 0xe9: /* jmp rel */
case 0xeb: /* jmp rel short */
rc = jmp_rel(ctxt, ctxt->src.val);
ctxt->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xf4: /* hlt */
ctxt->ops->halt(ctxt);
break;
case 0xf5: /* cmc */
/* complement carry flag from eflags reg */
ctxt->eflags ^= X86_EFLAGS_CF;
break;
case 0xf8: /* clc */
ctxt->eflags &= ~X86_EFLAGS_CF;
break;
case 0xf9: /* stc */
ctxt->eflags |= X86_EFLAGS_CF;
break;
case 0xfc: /* cld */
ctxt->eflags &= ~X86_EFLAGS_DF;
break;
case 0xfd: /* std */
ctxt->eflags |= X86_EFLAGS_DF;
break;
default:
goto cannot_emulate;
}
if (rc != X86EMUL_CONTINUE)
goto done;
writeback:
if (ctxt->d & SrcWrite) {
BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
rc = writeback(ctxt, &ctxt->src);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (!(ctxt->d & NoWrite)) {
rc = writeback(ctxt, &ctxt->dst);
if (rc != X86EMUL_CONTINUE)
goto done;
}
/*
* restore dst type in case the decoding will be reused
* (happens for string instruction )
*/
ctxt->dst.type = saved_dst_type;
if ((ctxt->d & SrcMask) == SrcSI)
string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
if ((ctxt->d & DstMask) == DstDI)
string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
if (ctxt->rep_prefix && (ctxt->d & String)) {
unsigned int count;
struct read_cache *r = &ctxt->io_read;
if ((ctxt->d & SrcMask) == SrcSI)
count = ctxt->src.count;
else
count = ctxt->dst.count;
register_address_increment(ctxt, VCPU_REGS_RCX, -count);
if (!string_insn_completed(ctxt)) {
/*
* Re-enter guest when pio read ahead buffer is empty
* or, if it is not used, after each 1024 iteration.
*/
if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
(r->end == 0 || r->end != r->pos)) {
/*
* Reset read cache. Usually happens before
* decode, but since instruction is restarted
* we have to do it here.
*/
ctxt->mem_read.end = 0;
writeback_registers(ctxt);
return EMULATION_RESTART;
}
goto done; /* skip rip writeback */
}
ctxt->eflags &= ~X86_EFLAGS_RF;
}
ctxt->eip = ctxt->_eip;
done:
if (rc == X86EMUL_PROPAGATE_FAULT) {
WARN_ON(ctxt->exception.vector > 0x1f);
ctxt->have_exception = true;
}
if (rc == X86EMUL_INTERCEPTED)
return EMULATION_INTERCEPTED;
if (rc == X86EMUL_CONTINUE)
writeback_registers(ctxt);
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
twobyte_insn:
switch (ctxt->b) {
case 0x09: /* wbinvd */
(ctxt->ops->wbinvd)(ctxt);
break;
case 0x08: /* invd */
case 0x0d: /* GrpP (prefetch) */
case 0x18: /* Grp16 (prefetch/nop) */
case 0x1f: /* nop */
break;
case 0x20: /* mov cr, reg */
ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
break;
case 0x21: /* mov from dr to reg */
ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
break;
case 0x40 ... 0x4f: /* cmov */
if (test_cc(ctxt->b, ctxt->eflags))
ctxt->dst.val = ctxt->src.val;
else if (ctxt->op_bytes != 4)
ctxt->dst.type = OP_NONE; /* no writeback */
break;
case 0x80 ... 0x8f: /* jnz rel, etc*/
if (test_cc(ctxt->b, ctxt->eflags))
rc = jmp_rel(ctxt, ctxt->src.val);
break;
case 0x90 ... 0x9f: /* setcc r/m8 */
ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
break;
case 0xb6 ... 0xb7: /* movzx */
ctxt->dst.bytes = ctxt->op_bytes;
ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
: (u16) ctxt->src.val;
break;
case 0xbe ... 0xbf: /* movsx */
ctxt->dst.bytes = ctxt->op_bytes;
ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
(s16) ctxt->src.val;
break;
default:
goto cannot_emulate;
}
threebyte_insn:
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
cannot_emulate:
return EMULATION_FAILED;
}
void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
{
invalidate_registers(ctxt);
}
void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
{
writeback_registers(ctxt);
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_5500_0 |
crossvul-cpp_data_bad_308_0 | /* Paravirtualization interfaces
Copyright (C) 2006 Rusty Russell IBM Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/efi.h>
#include <linux/bcd.h>
#include <linux/highmem.h>
#include <linux/kprobes.h>
#include <asm/bug.h>
#include <asm/paravirt.h>
#include <asm/debugreg.h>
#include <asm/desc.h>
#include <asm/setup.h>
#include <asm/pgtable.h>
#include <asm/time.h>
#include <asm/pgalloc.h>
#include <asm/irq.h>
#include <asm/delay.h>
#include <asm/fixmap.h>
#include <asm/apic.h>
#include <asm/tlbflush.h>
#include <asm/timer.h>
#include <asm/special_insns.h>
/*
* nop stub, which must not clobber anything *including the stack* to
* avoid confusing the entry prologues.
*/
extern void _paravirt_nop(void);
asm (".pushsection .entry.text, \"ax\"\n"
".global _paravirt_nop\n"
"_paravirt_nop:\n\t"
"ret\n\t"
".size _paravirt_nop, . - _paravirt_nop\n\t"
".type _paravirt_nop, @function\n\t"
".popsection");
/* identity function, which can be inlined */
u32 notrace _paravirt_ident_32(u32 x)
{
return x;
}
u64 notrace _paravirt_ident_64(u64 x)
{
return x;
}
void __init default_banner(void)
{
printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
pv_info.name);
}
/* Undefined instruction for dealing with missing ops pointers. */
static const unsigned char ud2a[] = { 0x0f, 0x0b };
struct branch {
unsigned char opcode;
u32 delta;
} __attribute__((packed));
unsigned paravirt_patch_call(void *insnbuf,
const void *target, u16 tgt_clobbers,
unsigned long addr, u16 site_clobbers,
unsigned len)
{
struct branch *b = insnbuf;
unsigned long delta = (unsigned long)target - (addr+5);
if (tgt_clobbers & ~site_clobbers)
return len; /* target would clobber too much for this site */
if (len < 5)
return len; /* call too long for patch site */
b->opcode = 0xe8; /* call */
b->delta = delta;
BUILD_BUG_ON(sizeof(*b) != 5);
return 5;
}
unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
unsigned long addr, unsigned len)
{
struct branch *b = insnbuf;
unsigned long delta = (unsigned long)target - (addr+5);
if (len < 5)
return len; /* call too long for patch site */
b->opcode = 0xe9; /* jmp */
b->delta = delta;
return 5;
}
DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
void __init native_pv_lock_init(void)
{
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
static_branch_disable(&virt_spin_lock_key);
}
/*
* Neat trick to map patch type back to the call within the
* corresponding structure.
*/
static void *get_call_destination(u8 type)
{
struct paravirt_patch_template tmpl = {
.pv_init_ops = pv_init_ops,
.pv_time_ops = pv_time_ops,
.pv_cpu_ops = pv_cpu_ops,
.pv_irq_ops = pv_irq_ops,
.pv_mmu_ops = pv_mmu_ops,
#ifdef CONFIG_PARAVIRT_SPINLOCKS
.pv_lock_ops = pv_lock_ops,
#endif
};
return *((void **)&tmpl + type);
}
unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
unsigned long addr, unsigned len)
{
void *opfunc = get_call_destination(type);
unsigned ret;
if (opfunc == NULL)
/* If there's no function, patch it with a ud2a (BUG) */
ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
else if (opfunc == _paravirt_nop)
ret = 0;
/* identity functions just return their single argument */
else if (opfunc == _paravirt_ident_32)
ret = paravirt_patch_ident_32(insnbuf, len);
else if (opfunc == _paravirt_ident_64)
ret = paravirt_patch_ident_64(insnbuf, len);
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
/* If operation requires a jmp, then jmp */
ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
else
/* Otherwise call the function; assume target could
clobber any caller-save reg */
ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY,
addr, clobbers, len);
return ret;
}
unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
const char *start, const char *end)
{
unsigned insn_len = end - start;
if (insn_len > len || start == NULL)
insn_len = len;
else
memcpy(insnbuf, start, insn_len);
return insn_len;
}
static void native_flush_tlb(void)
{
__native_flush_tlb();
}
/*
* Global pages have to be flushed a bit differently. Not a real
* performance problem because this does not happen often.
*/
static void native_flush_tlb_global(void)
{
__native_flush_tlb_global();
}
static void native_flush_tlb_one_user(unsigned long addr)
{
__native_flush_tlb_one_user(addr);
}
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
static u64 native_steal_clock(int cpu)
{
return 0;
}
/* These are in entry.S */
extern void native_iret(void);
extern void native_usergs_sysret64(void);
static struct resource reserve_ioports = {
.start = 0,
.end = IO_SPACE_LIMIT,
.name = "paravirt-ioport",
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
};
/*
* Reserve the whole legacy IO space to prevent any legacy drivers
* from wasting time probing for their hardware. This is a fairly
* brute-force approach to disabling all non-virtual drivers.
*
* Note that this must be called very early to have any effect.
*/
int paravirt_disable_iospace(void)
{
return request_resource(&ioport_resource, &reserve_ioports);
}
static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
static inline void enter_lazy(enum paravirt_lazy_mode mode)
{
BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
this_cpu_write(paravirt_lazy_mode, mode);
}
static void leave_lazy(enum paravirt_lazy_mode mode)
{
BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
}
void paravirt_enter_lazy_mmu(void)
{
enter_lazy(PARAVIRT_LAZY_MMU);
}
void paravirt_leave_lazy_mmu(void)
{
leave_lazy(PARAVIRT_LAZY_MMU);
}
void paravirt_flush_lazy_mmu(void)
{
preempt_disable();
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
preempt_enable();
}
void paravirt_start_context_switch(struct task_struct *prev)
{
BUG_ON(preemptible());
if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
}
enter_lazy(PARAVIRT_LAZY_CPU);
}
void paravirt_end_context_switch(struct task_struct *next)
{
BUG_ON(preemptible());
leave_lazy(PARAVIRT_LAZY_CPU);
if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
arch_enter_lazy_mmu_mode();
}
enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
{
if (in_interrupt())
return PARAVIRT_LAZY_NONE;
return this_cpu_read(paravirt_lazy_mode);
}
struct pv_info pv_info = {
.name = "bare hardware",
.kernel_rpl = 0,
.shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
#ifdef CONFIG_X86_64
.extra_user_64bit_cs = __USER_CS,
#endif
};
struct pv_init_ops pv_init_ops = {
.patch = native_patch,
};
struct pv_time_ops pv_time_ops = {
.sched_clock = native_sched_clock,
.steal_clock = native_steal_clock,
};
__visible struct pv_irq_ops pv_irq_ops = {
.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
.safe_halt = native_safe_halt,
.halt = native_halt,
};
__visible struct pv_cpu_ops pv_cpu_ops = {
.cpuid = native_cpuid,
.get_debugreg = native_get_debugreg,
.set_debugreg = native_set_debugreg,
.read_cr0 = native_read_cr0,
.write_cr0 = native_write_cr0,
.write_cr4 = native_write_cr4,
#ifdef CONFIG_X86_64
.read_cr8 = native_read_cr8,
.write_cr8 = native_write_cr8,
#endif
.wbinvd = native_wbinvd,
.read_msr = native_read_msr,
.write_msr = native_write_msr,
.read_msr_safe = native_read_msr_safe,
.write_msr_safe = native_write_msr_safe,
.read_pmc = native_read_pmc,
.load_tr_desc = native_load_tr_desc,
.set_ldt = native_set_ldt,
.load_gdt = native_load_gdt,
.load_idt = native_load_idt,
.store_tr = native_store_tr,
.load_tls = native_load_tls,
#ifdef CONFIG_X86_64
.load_gs_index = native_load_gs_index,
#endif
.write_ldt_entry = native_write_ldt_entry,
.write_gdt_entry = native_write_gdt_entry,
.write_idt_entry = native_write_idt_entry,
.alloc_ldt = paravirt_nop,
.free_ldt = paravirt_nop,
.load_sp0 = native_load_sp0,
#ifdef CONFIG_X86_64
.usergs_sysret64 = native_usergs_sysret64,
#endif
.iret = native_iret,
.swapgs = native_swapgs,
.set_iopl_mask = native_set_iopl_mask,
.io_delay = native_io_delay,
.start_context_switch = paravirt_nop,
.end_context_switch = paravirt_nop,
};
/* At this point, native_get/set_debugreg has real function entries */
NOKPROBE_SYMBOL(native_get_debugreg);
NOKPROBE_SYMBOL(native_set_debugreg);
NOKPROBE_SYMBOL(native_load_idt);
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
/* 32-bit pagetable entries */
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
#else
/* 64-bit pagetable entries */
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
#endif
struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
.read_cr2 = native_read_cr2,
.write_cr2 = native_write_cr2,
.read_cr3 = __native_read_cr3,
.write_cr3 = native_write_cr3,
.flush_tlb_user = native_flush_tlb,
.flush_tlb_kernel = native_flush_tlb_global,
.flush_tlb_one_user = native_flush_tlb_one_user,
.flush_tlb_others = native_flush_tlb_others,
.pgd_alloc = __paravirt_pgd_alloc,
.pgd_free = paravirt_nop,
.alloc_pte = paravirt_nop,
.alloc_pmd = paravirt_nop,
.alloc_pud = paravirt_nop,
.alloc_p4d = paravirt_nop,
.release_pte = paravirt_nop,
.release_pmd = paravirt_nop,
.release_pud = paravirt_nop,
.release_p4d = paravirt_nop,
.set_pte = native_set_pte,
.set_pte_at = native_set_pte_at,
.set_pmd = native_set_pmd,
.ptep_modify_prot_start = __ptep_modify_prot_start,
.ptep_modify_prot_commit = __ptep_modify_prot_commit,
#if CONFIG_PGTABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
.set_pte_atomic = native_set_pte_atomic,
.pte_clear = native_pte_clear,
.pmd_clear = native_pmd_clear,
#endif
.set_pud = native_set_pud,
.pmd_val = PTE_IDENT,
.make_pmd = PTE_IDENT,
#if CONFIG_PGTABLE_LEVELS >= 4
.pud_val = PTE_IDENT,
.make_pud = PTE_IDENT,
.set_p4d = native_set_p4d,
#if CONFIG_PGTABLE_LEVELS >= 5
.p4d_val = PTE_IDENT,
.make_p4d = PTE_IDENT,
.set_pgd = native_set_pgd,
#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
#endif /* CONFIG_PGTABLE_LEVELS >= 4 */
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
.pte_val = PTE_IDENT,
.pgd_val = PTE_IDENT,
.make_pte = PTE_IDENT,
.make_pgd = PTE_IDENT,
.dup_mmap = paravirt_nop,
.exit_mmap = paravirt_nop,
.activate_mm = paravirt_nop,
.lazy_mode = {
.enter = paravirt_nop,
.leave = paravirt_nop,
.flush = paravirt_nop,
},
.set_fixmap = native_set_fixmap,
};
EXPORT_SYMBOL_GPL(pv_time_ops);
EXPORT_SYMBOL (pv_cpu_ops);
EXPORT_SYMBOL (pv_mmu_ops);
EXPORT_SYMBOL_GPL(pv_info);
EXPORT_SYMBOL (pv_irq_ops);
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_308_0 |
crossvul-cpp_data_good_2958_1 | /* vi:set ts=8 sts=4 sw=4 noet:
*
* VIM - Vi IMproved by Bram Moolenaar
*
* Do ":help uganda" in Vim to read copying and usage conditions.
* Do ":help credits" in Vim to see a list of people who contributed.
* See README.txt for an overview of the Vim source code.
*/
/*
* fileio.c: read from and write to a file
*/
#include "vim.h"
#if defined(__TANDEM) || defined(__MINT__)
# include <limits.h> /* for SSIZE_MAX */
#endif
#if defined(HAVE_UTIME) && defined(HAVE_UTIME_H)
# include <utime.h> /* for struct utimbuf */
#endif
#define BUFSIZE 8192 /* size of normal write buffer */
#define SMBUFSIZE 256 /* size of emergency write buffer */
/* Is there any system that doesn't have access()? */
#define USE_MCH_ACCESS
#ifdef FEAT_MBYTE
static char_u *next_fenc(char_u **pp);
# ifdef FEAT_EVAL
static char_u *readfile_charconvert(char_u *fname, char_u *fenc, int *fdp);
# endif
#endif
#ifdef FEAT_VIMINFO
static void check_marks_read(void);
#endif
#ifdef FEAT_CRYPT
static char_u *check_for_cryptkey(char_u *cryptkey, char_u *ptr, long *sizep, off_T *filesizep, int newfile, char_u *fname, int *did_ask);
#endif
#ifdef UNIX
static void set_file_time(char_u *fname, time_t atime, time_t mtime);
#endif
static int set_rw_fname(char_u *fname, char_u *sfname);
static int msg_add_fileformat(int eol_type);
static void msg_add_eol(void);
static int check_mtime(buf_T *buf, stat_T *s);
static int time_differs(long t1, long t2);
#ifdef FEAT_AUTOCMD
static int apply_autocmds_exarg(event_T event, char_u *fname, char_u *fname_io, int force, buf_T *buf, exarg_T *eap);
static int au_find_group(char_u *name);
# define AUGROUP_DEFAULT -1 /* default autocmd group */
# define AUGROUP_ERROR -2 /* erroneous autocmd group */
# define AUGROUP_ALL -3 /* all autocmd groups */
#endif
#if defined(FEAT_CRYPT) || defined(FEAT_MBYTE)
# define HAS_BW_FLAGS
# define FIO_LATIN1 0x01 /* convert Latin1 */
# define FIO_UTF8 0x02 /* convert UTF-8 */
# define FIO_UCS2 0x04 /* convert UCS-2 */
# define FIO_UCS4 0x08 /* convert UCS-4 */
# define FIO_UTF16 0x10 /* convert UTF-16 */
# ifdef WIN3264
# define FIO_CODEPAGE 0x20 /* convert MS-Windows codepage */
# define FIO_PUT_CP(x) (((x) & 0xffff) << 16) /* put codepage in top word */
# define FIO_GET_CP(x) (((x)>>16) & 0xffff) /* get codepage from top word */
# endif
# ifdef MACOS_CONVERT
# define FIO_MACROMAN 0x20 /* convert MacRoman */
# endif
# define FIO_ENDIAN_L 0x80 /* little endian */
# define FIO_ENCRYPTED 0x1000 /* encrypt written bytes */
# define FIO_NOCONVERT 0x2000 /* skip encoding conversion */
# define FIO_UCSBOM 0x4000 /* check for BOM at start of file */
# define FIO_ALL -1 /* allow all formats */
#endif
/* When converting, a read() or write() may leave some bytes to be converted
* for the next call. The value is guessed... */
#define CONV_RESTLEN 30
/* We have to guess how much a sequence of bytes may expand when converting
* with iconv() to be able to allocate a buffer. */
#define ICONV_MULT 8
/*
* Structure to pass arguments from buf_write() to buf_write_bytes().
*/
struct bw_info
{
int bw_fd; /* file descriptor */
char_u *bw_buf; /* buffer with data to be written */
int bw_len; /* length of data */
#ifdef HAS_BW_FLAGS
int bw_flags; /* FIO_ flags */
#endif
#ifdef FEAT_CRYPT
buf_T *bw_buffer; /* buffer being written */
#endif
#ifdef FEAT_MBYTE
char_u bw_rest[CONV_RESTLEN]; /* not converted bytes */
int bw_restlen; /* nr of bytes in bw_rest[] */
int bw_first; /* first write call */
char_u *bw_conv_buf; /* buffer for writing converted chars */
int bw_conv_buflen; /* size of bw_conv_buf */
int bw_conv_error; /* set for conversion error */
linenr_T bw_conv_error_lnum; /* first line with error or zero */
linenr_T bw_start_lnum; /* line number at start of buffer */
# ifdef USE_ICONV
iconv_t bw_iconv_fd; /* descriptor for iconv() or -1 */
# endif
#endif
};
static int buf_write_bytes(struct bw_info *ip);
#ifdef FEAT_MBYTE
static linenr_T readfile_linenr(linenr_T linecnt, char_u *p, char_u *endp);
static int ucs2bytes(unsigned c, char_u **pp, int flags);
static int need_conversion(char_u *fenc);
static int get_fio_flags(char_u *ptr);
static char_u *check_for_bom(char_u *p, long size, int *lenp, int flags);
static int make_bom(char_u *buf, char_u *name);
# ifdef WIN3264
static int get_win_fio_flags(char_u *ptr);
# endif
# ifdef MACOS_CONVERT
static int get_mac_fio_flags(char_u *ptr);
# endif
#endif
static int move_lines(buf_T *frombuf, buf_T *tobuf);
#ifdef TEMPDIRNAMES
static void vim_settempdir(char_u *tempdir);
#endif
#ifdef FEAT_AUTOCMD
static char *e_auchangedbuf = N_("E812: Autocommands changed buffer or buffer name");
#endif
#ifdef FEAT_AUTOCMD
/*
* Set by the apply_autocmds_group function if the given event is equal to
* EVENT_FILETYPE. Used by the readfile function in order to determine if
* EVENT_BUFREADPOST triggered the EVENT_FILETYPE.
*
* Relying on this value requires one to reset it prior calling
* apply_autocmds_group.
*/
static int au_did_filetype INIT(= FALSE);
#endif
void
filemess(
buf_T *buf,
char_u *name,
char_u *s,
int attr)
{
int msg_scroll_save;
if (msg_silent != 0)
return;
msg_add_fname(buf, name); /* put file name in IObuff with quotes */
/* If it's extremely long, truncate it. */
if (STRLEN(IObuff) > IOSIZE - 80)
IObuff[IOSIZE - 80] = NUL;
STRCAT(IObuff, s);
/*
* For the first message may have to start a new line.
* For further ones overwrite the previous one, reset msg_scroll before
* calling filemess().
*/
msg_scroll_save = msg_scroll;
if (shortmess(SHM_OVERALL) && !exiting && p_verbose == 0)
msg_scroll = FALSE;
if (!msg_scroll) /* wait a bit when overwriting an error msg */
check_for_delay(FALSE);
msg_start();
msg_scroll = msg_scroll_save;
msg_scrolled_ign = TRUE;
/* may truncate the message to avoid a hit-return prompt */
msg_outtrans_attr(msg_may_trunc(FALSE, IObuff), attr);
msg_clr_eos();
out_flush();
msg_scrolled_ign = FALSE;
}
/*
* Read lines from file "fname" into the buffer after line "from".
*
* 1. We allocate blocks with lalloc, as big as possible.
* 2. Each block is filled with characters from the file with a single read().
* 3. The lines are inserted in the buffer with ml_append().
*
* (caller must check that fname != NULL, unless READ_STDIN is used)
*
* "lines_to_skip" is the number of lines that must be skipped
* "lines_to_read" is the number of lines that are appended
* When not recovering lines_to_skip is 0 and lines_to_read MAXLNUM.
*
* flags:
* READ_NEW starting to edit a new buffer
* READ_FILTER reading filter output
* READ_STDIN read from stdin instead of a file
* READ_BUFFER read from curbuf instead of a file (converting after reading
* stdin)
* READ_DUMMY read into a dummy buffer (to check if file contents changed)
* READ_KEEP_UNDO don't clear undo info or read it from a file
* READ_FIFO read from fifo/socket instead of a file
*
* return FAIL for failure, NOTDONE for directory (failure), or OK
*/
int
readfile(
char_u *fname,
char_u *sfname,
linenr_T from,
linenr_T lines_to_skip,
linenr_T lines_to_read,
exarg_T *eap, /* can be NULL! */
int flags)
{
int fd = 0;
int newfile = (flags & READ_NEW);
int check_readonly;
int filtering = (flags & READ_FILTER);
int read_stdin = (flags & READ_STDIN);
int read_buffer = (flags & READ_BUFFER);
int read_fifo = (flags & READ_FIFO);
int set_options = newfile || read_buffer
|| (eap != NULL && eap->read_edit);
linenr_T read_buf_lnum = 1; /* next line to read from curbuf */
colnr_T read_buf_col = 0; /* next char to read from this line */
char_u c;
linenr_T lnum = from;
char_u *ptr = NULL; /* pointer into read buffer */
char_u *buffer = NULL; /* read buffer */
char_u *new_buffer = NULL; /* init to shut up gcc */
char_u *line_start = NULL; /* init to shut up gcc */
int wasempty; /* buffer was empty before reading */
colnr_T len;
long size = 0;
char_u *p;
off_T filesize = 0;
int skip_read = FALSE;
#ifdef FEAT_CRYPT
char_u *cryptkey = NULL;
int did_ask_for_key = FALSE;
#endif
#ifdef FEAT_PERSISTENT_UNDO
context_sha256_T sha_ctx;
int read_undo_file = FALSE;
#endif
int split = 0; /* number of split lines */
#define UNKNOWN 0x0fffffff /* file size is unknown */
linenr_T linecnt;
int error = FALSE; /* errors encountered */
int ff_error = EOL_UNKNOWN; /* file format with errors */
long linerest = 0; /* remaining chars in line */
#ifdef UNIX
int perm = 0;
int swap_mode = -1; /* protection bits for swap file */
#else
int perm;
#endif
int fileformat = 0; /* end-of-line format */
int keep_fileformat = FALSE;
stat_T st;
int file_readonly;
linenr_T skip_count = 0;
linenr_T read_count = 0;
int msg_save = msg_scroll;
linenr_T read_no_eol_lnum = 0; /* non-zero lnum when last line of
* last read was missing the eol */
int try_mac;
int try_dos;
int try_unix;
int file_rewind = FALSE;
#ifdef FEAT_MBYTE
int can_retry;
linenr_T conv_error = 0; /* line nr with conversion error */
linenr_T illegal_byte = 0; /* line nr with illegal byte */
int keep_dest_enc = FALSE; /* don't retry when char doesn't fit
in destination encoding */
int bad_char_behavior = BAD_REPLACE;
/* BAD_KEEP, BAD_DROP or character to
* replace with */
char_u *tmpname = NULL; /* name of 'charconvert' output file */
int fio_flags = 0;
char_u *fenc; /* fileencoding to use */
int fenc_alloced; /* fenc_next is in allocated memory */
char_u *fenc_next = NULL; /* next item in 'fencs' or NULL */
int advance_fenc = FALSE;
long real_size = 0;
# ifdef USE_ICONV
iconv_t iconv_fd = (iconv_t)-1; /* descriptor for iconv() or -1 */
# ifdef FEAT_EVAL
int did_iconv = FALSE; /* TRUE when iconv() failed and trying
'charconvert' next */
# endif
# endif
int converted = FALSE; /* TRUE if conversion done */
int notconverted = FALSE; /* TRUE if conversion wanted but it
wasn't possible */
char_u conv_rest[CONV_RESTLEN];
int conv_restlen = 0; /* nr of bytes in conv_rest[] */
#endif
#ifdef FEAT_AUTOCMD
buf_T *old_curbuf;
char_u *old_b_ffname;
char_u *old_b_fname;
int using_b_ffname;
int using_b_fname;
#endif
#ifdef FEAT_AUTOCMD
au_did_filetype = FALSE; /* reset before triggering any autocommands */
#endif
curbuf->b_no_eol_lnum = 0; /* in case it was set by the previous read */
/*
* If there is no file name yet, use the one for the read file.
* BF_NOTEDITED is set to reflect this.
* Don't do this for a read from a filter.
* Only do this when 'cpoptions' contains the 'f' flag.
*/
if (curbuf->b_ffname == NULL
&& !filtering
&& fname != NULL
&& vim_strchr(p_cpo, CPO_FNAMER) != NULL
&& !(flags & READ_DUMMY))
{
if (set_rw_fname(fname, sfname) == FAIL)
return FAIL;
}
#ifdef FEAT_AUTOCMD
/* Remember the initial values of curbuf, curbuf->b_ffname and
* curbuf->b_fname to detect whether they are altered as a result of
* executing nasty autocommands. Also check if "fname" and "sfname"
* point to one of these values. */
old_curbuf = curbuf;
old_b_ffname = curbuf->b_ffname;
old_b_fname = curbuf->b_fname;
using_b_ffname = (fname == curbuf->b_ffname)
|| (sfname == curbuf->b_ffname);
using_b_fname = (fname == curbuf->b_fname) || (sfname == curbuf->b_fname);
#endif
/* After reading a file the cursor line changes but we don't want to
* display the line. */
ex_no_reprint = TRUE;
/* don't display the file info for another buffer now */
need_fileinfo = FALSE;
/*
* For Unix: Use the short file name whenever possible.
* Avoids problems with networks and when directory names are changed.
* Don't do this for MS-DOS, a "cd" in a sub-shell may have moved us to
* another directory, which we don't detect.
*/
if (sfname == NULL)
sfname = fname;
#if defined(UNIX)
fname = sfname;
#endif
#ifdef FEAT_AUTOCMD
/*
* The BufReadCmd and FileReadCmd events intercept the reading process by
* executing the associated commands instead.
*/
if (!filtering && !read_stdin && !read_buffer)
{
pos_T pos;
pos = curbuf->b_op_start;
/* Set '[ mark to the line above where the lines go (line 1 if zero). */
curbuf->b_op_start.lnum = ((from == 0) ? 1 : from);
curbuf->b_op_start.col = 0;
if (newfile)
{
if (apply_autocmds_exarg(EVENT_BUFREADCMD, NULL, sfname,
FALSE, curbuf, eap))
#ifdef FEAT_EVAL
return aborting() ? FAIL : OK;
#else
return OK;
#endif
}
else if (apply_autocmds_exarg(EVENT_FILEREADCMD, sfname, sfname,
FALSE, NULL, eap))
#ifdef FEAT_EVAL
return aborting() ? FAIL : OK;
#else
return OK;
#endif
curbuf->b_op_start = pos;
}
#endif
if ((shortmess(SHM_OVER) || curbuf->b_help) && p_verbose == 0)
msg_scroll = FALSE; /* overwrite previous file message */
else
msg_scroll = TRUE; /* don't overwrite previous file message */
/*
* If the name ends in a path separator, we can't open it. Check here,
* because reading the file may actually work, but then creating the swap
* file may destroy it! Reported on MS-DOS and Win 95.
* If the name is too long we might crash further on, quit here.
*/
if (fname != NULL && *fname != NUL)
{
p = fname + STRLEN(fname);
if (after_pathsep(fname, p) || STRLEN(fname) >= MAXPATHL)
{
filemess(curbuf, fname, (char_u *)_("Illegal file name"), 0);
msg_end();
msg_scroll = msg_save;
return FAIL;
}
}
if (!read_stdin && !read_buffer && !read_fifo)
{
#ifdef UNIX
/*
* On Unix it is possible to read a directory, so we have to
* check for it before the mch_open().
*/
perm = mch_getperm(fname);
if (perm >= 0 && !S_ISREG(perm) /* not a regular file ... */
# ifdef S_ISFIFO
&& !S_ISFIFO(perm) /* ... or fifo */
# endif
# ifdef S_ISSOCK
&& !S_ISSOCK(perm) /* ... or socket */
# endif
# ifdef OPEN_CHR_FILES
&& !(S_ISCHR(perm) && is_dev_fd_file(fname))
/* ... or a character special file named /dev/fd/<n> */
# endif
)
{
int retval = FAIL;
if (S_ISDIR(perm))
{
filemess(curbuf, fname, (char_u *)_("is a directory"), 0);
retval = NOTDONE;
}
else
filemess(curbuf, fname, (char_u *)_("is not a file"), 0);
msg_end();
msg_scroll = msg_save;
return retval;
}
#endif
#if defined(MSWIN)
/*
* MS-Windows allows opening a device, but we will probably get stuck
* trying to read it.
*/
if (!p_odev && mch_nodetype(fname) == NODE_WRITABLE)
{
filemess(curbuf, fname, (char_u *)_("is a device (disabled with 'opendevice' option)"), 0);
msg_end();
msg_scroll = msg_save;
return FAIL;
}
#endif
}
/* Set default or forced 'fileformat' and 'binary'. */
set_file_options(set_options, eap);
/*
* When opening a new file we take the readonly flag from the file.
* Default is r/w, can be set to r/o below.
* Don't reset it when in readonly mode
* Only set/reset b_p_ro when BF_CHECK_RO is set.
*/
check_readonly = (newfile && (curbuf->b_flags & BF_CHECK_RO));
if (check_readonly && !readonlymode)
curbuf->b_p_ro = FALSE;
if (newfile && !read_stdin && !read_buffer && !read_fifo)
{
/* Remember time of file. */
if (mch_stat((char *)fname, &st) >= 0)
{
buf_store_time(curbuf, &st, fname);
curbuf->b_mtime_read = curbuf->b_mtime;
#ifdef UNIX
/*
* Use the protection bits of the original file for the swap file.
* This makes it possible for others to read the name of the
* edited file from the swapfile, but only if they can read the
* edited file.
* Remove the "write" and "execute" bits for group and others
* (they must not write the swapfile).
* Add the "read" and "write" bits for the user, otherwise we may
* not be able to write to the file ourselves.
* Setting the bits is done below, after creating the swap file.
*/
swap_mode = (st.st_mode & 0644) | 0600;
#endif
#ifdef FEAT_CW_EDITOR
/* Get the FSSpec on MacOS
* TODO: Update it properly when the buffer name changes
*/
(void)GetFSSpecFromPath(curbuf->b_ffname, &curbuf->b_FSSpec);
#endif
#ifdef VMS
curbuf->b_fab_rfm = st.st_fab_rfm;
curbuf->b_fab_rat = st.st_fab_rat;
curbuf->b_fab_mrs = st.st_fab_mrs;
#endif
}
else
{
curbuf->b_mtime = 0;
curbuf->b_mtime_read = 0;
curbuf->b_orig_size = 0;
curbuf->b_orig_mode = 0;
}
/* Reset the "new file" flag. It will be set again below when the
* file doesn't exist. */
curbuf->b_flags &= ~(BF_NEW | BF_NEW_W);
}
/*
* for UNIX: check readonly with perm and mch_access()
* for Amiga: check readonly by trying to open the file for writing
*/
file_readonly = FALSE;
if (read_stdin)
{
#if defined(MSWIN)
/* Force binary I/O on stdin to avoid CR-LF -> LF conversion. */
setmode(0, O_BINARY);
#endif
}
else if (!read_buffer)
{
#ifdef USE_MCH_ACCESS
if (
# ifdef UNIX
!(perm & 0222) ||
# endif
mch_access((char *)fname, W_OK))
file_readonly = TRUE;
fd = mch_open((char *)fname, O_RDONLY | O_EXTRA, 0);
#else
if (!newfile
|| readonlymode
|| (fd = mch_open((char *)fname, O_RDWR | O_EXTRA, 0)) < 0)
{
file_readonly = TRUE;
/* try to open ro */
fd = mch_open((char *)fname, O_RDONLY | O_EXTRA, 0);
}
#endif
}
if (fd < 0) /* cannot open at all */
{
#ifndef UNIX
int isdir_f;
#endif
msg_scroll = msg_save;
#ifndef UNIX
/*
* On Amiga we can't open a directory, check here.
*/
isdir_f = (mch_isdir(fname));
perm = mch_getperm(fname); /* check if the file exists */
if (isdir_f)
{
filemess(curbuf, sfname, (char_u *)_("is a directory"), 0);
curbuf->b_p_ro = TRUE; /* must use "w!" now */
}
else
#endif
if (newfile)
{
if (perm < 0
#ifdef ENOENT
&& errno == ENOENT
#endif
)
{
/*
* Set the 'new-file' flag, so that when the file has
* been created by someone else, a ":w" will complain.
*/
curbuf->b_flags |= BF_NEW;
/* Create a swap file now, so that other Vims are warned
* that we are editing this file. Don't do this for a
* "nofile" or "nowrite" buffer type. */
#ifdef FEAT_QUICKFIX
if (!bt_dontwrite(curbuf))
#endif
{
check_need_swap(newfile);
#ifdef FEAT_AUTOCMD
/* SwapExists autocommand may mess things up */
if (curbuf != old_curbuf
|| (using_b_ffname
&& (old_b_ffname != curbuf->b_ffname))
|| (using_b_fname
&& (old_b_fname != curbuf->b_fname)))
{
EMSG(_(e_auchangedbuf));
return FAIL;
}
#endif
}
if (dir_of_file_exists(fname))
filemess(curbuf, sfname, (char_u *)_("[New File]"), 0);
else
filemess(curbuf, sfname,
(char_u *)_("[New DIRECTORY]"), 0);
#ifdef FEAT_VIMINFO
/* Even though this is a new file, it might have been
* edited before and deleted. Get the old marks. */
check_marks_read();
#endif
#ifdef FEAT_MBYTE
/* Set forced 'fileencoding'. */
if (eap != NULL)
set_forced_fenc(eap);
#endif
#ifdef FEAT_AUTOCMD
apply_autocmds_exarg(EVENT_BUFNEWFILE, sfname, sfname,
FALSE, curbuf, eap);
#endif
/* remember the current fileformat */
save_file_ff(curbuf);
#if defined(FEAT_AUTOCMD) && defined(FEAT_EVAL)
if (aborting()) /* autocmds may abort script processing */
return FAIL;
#endif
return OK; /* a new file is not an error */
}
else
{
filemess(curbuf, sfname, (char_u *)(
# ifdef EFBIG
(errno == EFBIG) ? _("[File too big]") :
# endif
# ifdef EOVERFLOW
(errno == EOVERFLOW) ? _("[File too big]") :
# endif
_("[Permission Denied]")), 0);
curbuf->b_p_ro = TRUE; /* must use "w!" now */
}
}
return FAIL;
}
/*
* Only set the 'ro' flag for readonly files the first time they are
* loaded. Help files always get readonly mode
*/
if ((check_readonly && file_readonly) || curbuf->b_help)
curbuf->b_p_ro = TRUE;
if (set_options)
{
/* Don't change 'eol' if reading from buffer as it will already be
* correctly set when reading stdin. */
if (!read_buffer)
{
curbuf->b_p_eol = TRUE;
curbuf->b_start_eol = TRUE;
}
#ifdef FEAT_MBYTE
curbuf->b_p_bomb = FALSE;
curbuf->b_start_bomb = FALSE;
#endif
}
/* Create a swap file now, so that other Vims are warned that we are
* editing this file.
* Don't do this for a "nofile" or "nowrite" buffer type. */
#ifdef FEAT_QUICKFIX
if (!bt_dontwrite(curbuf))
#endif
{
check_need_swap(newfile);
#ifdef FEAT_AUTOCMD
if (!read_stdin && (curbuf != old_curbuf
|| (using_b_ffname && (old_b_ffname != curbuf->b_ffname))
|| (using_b_fname && (old_b_fname != curbuf->b_fname))))
{
EMSG(_(e_auchangedbuf));
if (!read_buffer)
close(fd);
return FAIL;
}
#endif
#ifdef UNIX
/* Set swap file protection bits after creating it. */
if (swap_mode > 0 && curbuf->b_ml.ml_mfp != NULL
&& curbuf->b_ml.ml_mfp->mf_fname != NULL)
{
char_u *swap_fname = curbuf->b_ml.ml_mfp->mf_fname;
/*
* If the group-read bit is set but not the world-read bit, then
* the group must be equal to the group of the original file. If
* we can't make that happen then reset the group-read bit. This
* avoids making the swap file readable to more users when the
* primary group of the user is too permissive.
*/
if ((swap_mode & 044) == 040)
{
stat_T swap_st;
if (mch_stat((char *)swap_fname, &swap_st) >= 0
&& st.st_gid != swap_st.st_gid
&& fchown(curbuf->b_ml.ml_mfp->mf_fd, -1, st.st_gid)
== -1)
swap_mode &= 0600;
}
(void)mch_setperm(swap_fname, (long)swap_mode);
}
#endif
}
#if defined(HAS_SWAP_EXISTS_ACTION)
/* If "Quit" selected at ATTENTION dialog, don't load the file */
if (swap_exists_action == SEA_QUIT)
{
if (!read_buffer && !read_stdin)
close(fd);
return FAIL;
}
#endif
++no_wait_return; /* don't wait for return yet */
/*
* Set '[ mark to the line above where the lines go (line 1 if zero).
*/
curbuf->b_op_start.lnum = ((from == 0) ? 1 : from);
curbuf->b_op_start.col = 0;
try_mac = (vim_strchr(p_ffs, 'm') != NULL);
try_dos = (vim_strchr(p_ffs, 'd') != NULL);
try_unix = (vim_strchr(p_ffs, 'x') != NULL);
#ifdef FEAT_AUTOCMD
if (!read_buffer)
{
int m = msg_scroll;
int n = msg_scrolled;
/*
* The file must be closed again, the autocommands may want to change
* the file before reading it.
*/
if (!read_stdin)
close(fd); /* ignore errors */
/*
* The output from the autocommands should not overwrite anything and
* should not be overwritten: Set msg_scroll, restore its value if no
* output was done.
*/
msg_scroll = TRUE;
if (filtering)
apply_autocmds_exarg(EVENT_FILTERREADPRE, NULL, sfname,
FALSE, curbuf, eap);
else if (read_stdin)
apply_autocmds_exarg(EVENT_STDINREADPRE, NULL, sfname,
FALSE, curbuf, eap);
else if (newfile)
apply_autocmds_exarg(EVENT_BUFREADPRE, NULL, sfname,
FALSE, curbuf, eap);
else
apply_autocmds_exarg(EVENT_FILEREADPRE, sfname, sfname,
FALSE, NULL, eap);
/* autocommands may have changed it */
try_mac = (vim_strchr(p_ffs, 'm') != NULL);
try_dos = (vim_strchr(p_ffs, 'd') != NULL);
try_unix = (vim_strchr(p_ffs, 'x') != NULL);
if (msg_scrolled == n)
msg_scroll = m;
#ifdef FEAT_EVAL
if (aborting()) /* autocmds may abort script processing */
{
--no_wait_return;
msg_scroll = msg_save;
curbuf->b_p_ro = TRUE; /* must use "w!" now */
return FAIL;
}
#endif
/*
* Don't allow the autocommands to change the current buffer.
* Try to re-open the file.
*
* Don't allow the autocommands to change the buffer name either
* (cd for example) if it invalidates fname or sfname.
*/
if (!read_stdin && (curbuf != old_curbuf
|| (using_b_ffname && (old_b_ffname != curbuf->b_ffname))
|| (using_b_fname && (old_b_fname != curbuf->b_fname))
|| (fd = mch_open((char *)fname, O_RDONLY | O_EXTRA, 0)) < 0))
{
--no_wait_return;
msg_scroll = msg_save;
if (fd < 0)
EMSG(_("E200: *ReadPre autocommands made the file unreadable"));
else
EMSG(_("E201: *ReadPre autocommands must not change current buffer"));
curbuf->b_p_ro = TRUE; /* must use "w!" now */
return FAIL;
}
}
#endif /* FEAT_AUTOCMD */
/* Autocommands may add lines to the file, need to check if it is empty */
wasempty = (curbuf->b_ml.ml_flags & ML_EMPTY);
if (!recoverymode && !filtering && !(flags & READ_DUMMY))
{
/*
* Show the user that we are busy reading the input. Sometimes this
* may take a while. When reading from stdin another program may
* still be running, don't move the cursor to the last line, unless
* always using the GUI.
*/
if (read_stdin)
{
#ifndef ALWAYS_USE_GUI
mch_msg(_("Vim: Reading from stdin...\n"));
#endif
#ifdef FEAT_GUI
/* Also write a message in the GUI window, if there is one. */
if (gui.in_use && !gui.dying && !gui.starting)
{
p = (char_u *)_("Reading from stdin...");
gui_write(p, (int)STRLEN(p));
}
#endif
}
else if (!read_buffer)
filemess(curbuf, sfname, (char_u *)"", 0);
}
msg_scroll = FALSE; /* overwrite the file message */
/*
* Set linecnt now, before the "retry" caused by a wrong guess for
* fileformat, and after the autocommands, which may change them.
*/
linecnt = curbuf->b_ml.ml_line_count;
#ifdef FEAT_MBYTE
/* "++bad=" argument. */
if (eap != NULL && eap->bad_char != 0)
{
bad_char_behavior = eap->bad_char;
if (set_options)
curbuf->b_bad_char = eap->bad_char;
}
else
curbuf->b_bad_char = 0;
/*
* Decide which 'encoding' to use or use first.
*/
if (eap != NULL && eap->force_enc != 0)
{
fenc = enc_canonize(eap->cmd + eap->force_enc);
fenc_alloced = TRUE;
keep_dest_enc = TRUE;
}
else if (curbuf->b_p_bin)
{
fenc = (char_u *)""; /* binary: don't convert */
fenc_alloced = FALSE;
}
else if (curbuf->b_help)
{
char_u firstline[80];
int fc;
/* Help files are either utf-8 or latin1. Try utf-8 first, if this
* fails it must be latin1.
* Always do this when 'encoding' is "utf-8". Otherwise only do
* this when needed to avoid [converted] remarks all the time.
* It is needed when the first line contains non-ASCII characters.
* That is only in *.??x files. */
fenc = (char_u *)"latin1";
c = enc_utf8;
if (!c && !read_stdin)
{
fc = fname[STRLEN(fname) - 1];
if (TOLOWER_ASC(fc) == 'x')
{
/* Read the first line (and a bit more). Immediately rewind to
* the start of the file. If the read() fails "len" is -1. */
len = read_eintr(fd, firstline, 80);
vim_lseek(fd, (off_T)0L, SEEK_SET);
for (p = firstline; p < firstline + len; ++p)
if (*p >= 0x80)
{
c = TRUE;
break;
}
}
}
if (c)
{
fenc_next = fenc;
fenc = (char_u *)"utf-8";
/* When the file is utf-8 but a character doesn't fit in
* 'encoding' don't retry. In help text editing utf-8 bytes
* doesn't make sense. */
if (!enc_utf8)
keep_dest_enc = TRUE;
}
fenc_alloced = FALSE;
}
else if (*p_fencs == NUL)
{
fenc = curbuf->b_p_fenc; /* use format from buffer */
fenc_alloced = FALSE;
}
else
{
fenc_next = p_fencs; /* try items in 'fileencodings' */
fenc = next_fenc(&fenc_next);
fenc_alloced = TRUE;
}
#endif
/*
* Jump back here to retry reading the file in different ways.
* Reasons to retry:
* - encoding conversion failed: try another one from "fenc_next"
* - BOM detected and fenc was set, need to setup conversion
* - "fileformat" check failed: try another
*
* Variables set for special retry actions:
* "file_rewind" Rewind the file to start reading it again.
* "advance_fenc" Advance "fenc" using "fenc_next".
* "skip_read" Re-use already read bytes (BOM detected).
* "did_iconv" iconv() conversion failed, try 'charconvert'.
* "keep_fileformat" Don't reset "fileformat".
*
* Other status indicators:
* "tmpname" When != NULL did conversion with 'charconvert'.
* Output file has to be deleted afterwards.
* "iconv_fd" When != -1 did conversion with iconv().
*/
retry:
if (file_rewind)
{
if (read_buffer)
{
read_buf_lnum = 1;
read_buf_col = 0;
}
else if (read_stdin || vim_lseek(fd, (off_T)0L, SEEK_SET) != 0)
{
/* Can't rewind the file, give up. */
error = TRUE;
goto failed;
}
/* Delete the previously read lines. */
while (lnum > from)
ml_delete(lnum--, FALSE);
file_rewind = FALSE;
#ifdef FEAT_MBYTE
if (set_options)
{
curbuf->b_p_bomb = FALSE;
curbuf->b_start_bomb = FALSE;
}
conv_error = 0;
#endif
}
/*
* When retrying with another "fenc" and the first time "fileformat"
* will be reset.
*/
if (keep_fileformat)
keep_fileformat = FALSE;
else
{
if (eap != NULL && eap->force_ff != 0)
{
fileformat = get_fileformat_force(curbuf, eap);
try_unix = try_dos = try_mac = FALSE;
}
else if (curbuf->b_p_bin)
fileformat = EOL_UNIX; /* binary: use Unix format */
else if (*p_ffs == NUL)
fileformat = get_fileformat(curbuf);/* use format from buffer */
else
fileformat = EOL_UNKNOWN; /* detect from file */
}
#ifdef FEAT_MBYTE
# ifdef USE_ICONV
if (iconv_fd != (iconv_t)-1)
{
/* aborted conversion with iconv(), close the descriptor */
iconv_close(iconv_fd);
iconv_fd = (iconv_t)-1;
}
# endif
if (advance_fenc)
{
/*
* Try the next entry in 'fileencodings'.
*/
advance_fenc = FALSE;
if (eap != NULL && eap->force_enc != 0)
{
/* Conversion given with "++cc=" wasn't possible, read
* without conversion. */
notconverted = TRUE;
conv_error = 0;
if (fenc_alloced)
vim_free(fenc);
fenc = (char_u *)"";
fenc_alloced = FALSE;
}
else
{
if (fenc_alloced)
vim_free(fenc);
if (fenc_next != NULL)
{
fenc = next_fenc(&fenc_next);
fenc_alloced = (fenc_next != NULL);
}
else
{
fenc = (char_u *)"";
fenc_alloced = FALSE;
}
}
if (tmpname != NULL)
{
mch_remove(tmpname); /* delete converted file */
vim_free(tmpname);
tmpname = NULL;
}
}
/*
* Conversion may be required when the encoding of the file is different
* from 'encoding' or 'encoding' is UTF-16, UCS-2 or UCS-4.
*/
fio_flags = 0;
converted = need_conversion(fenc);
if (converted)
{
/* "ucs-bom" means we need to check the first bytes of the file
* for a BOM. */
if (STRCMP(fenc, ENC_UCSBOM) == 0)
fio_flags = FIO_UCSBOM;
/*
* Check if UCS-2/4 or Latin1 to UTF-8 conversion needs to be
* done. This is handled below after read(). Prepare the
* fio_flags to avoid having to parse the string each time.
* Also check for Unicode to Latin1 conversion, because iconv()
* appears not to handle this correctly. This works just like
* conversion to UTF-8 except how the resulting character is put in
* the buffer.
*/
else if (enc_utf8 || STRCMP(p_enc, "latin1") == 0)
fio_flags = get_fio_flags(fenc);
# ifdef WIN3264
/*
* Conversion from an MS-Windows codepage to UTF-8 or another codepage
* is handled with MultiByteToWideChar().
*/
if (fio_flags == 0)
fio_flags = get_win_fio_flags(fenc);
# endif
# ifdef MACOS_CONVERT
/* Conversion from Apple MacRoman to latin1 or UTF-8 */
if (fio_flags == 0)
fio_flags = get_mac_fio_flags(fenc);
# endif
# ifdef USE_ICONV
/*
* Try using iconv() if we can't convert internally.
*/
if (fio_flags == 0
# ifdef FEAT_EVAL
&& !did_iconv
# endif
)
iconv_fd = (iconv_t)my_iconv_open(
enc_utf8 ? (char_u *)"utf-8" : p_enc, fenc);
# endif
# ifdef FEAT_EVAL
/*
* Use the 'charconvert' expression when conversion is required
* and we can't do it internally or with iconv().
*/
if (fio_flags == 0 && !read_stdin && !read_buffer && *p_ccv != NUL
&& !read_fifo
# ifdef USE_ICONV
&& iconv_fd == (iconv_t)-1
# endif
)
{
# ifdef USE_ICONV
did_iconv = FALSE;
# endif
/* Skip conversion when it's already done (retry for wrong
* "fileformat"). */
if (tmpname == NULL)
{
tmpname = readfile_charconvert(fname, fenc, &fd);
if (tmpname == NULL)
{
/* Conversion failed. Try another one. */
advance_fenc = TRUE;
if (fd < 0)
{
/* Re-opening the original file failed! */
EMSG(_("E202: Conversion made file unreadable!"));
error = TRUE;
goto failed;
}
goto retry;
}
}
}
else
# endif
{
if (fio_flags == 0
# ifdef USE_ICONV
&& iconv_fd == (iconv_t)-1
# endif
)
{
/* Conversion wanted but we can't.
* Try the next conversion in 'fileencodings' */
advance_fenc = TRUE;
goto retry;
}
}
}
/* Set "can_retry" when it's possible to rewind the file and try with
* another "fenc" value. It's FALSE when no other "fenc" to try, reading
* stdin or fixed at a specific encoding. */
can_retry = (*fenc != NUL && !read_stdin && !read_fifo && !keep_dest_enc);
#endif
if (!skip_read)
{
linerest = 0;
filesize = 0;
skip_count = lines_to_skip;
read_count = lines_to_read;
#ifdef FEAT_MBYTE
conv_restlen = 0;
#endif
#ifdef FEAT_PERSISTENT_UNDO
read_undo_file = (newfile && (flags & READ_KEEP_UNDO) == 0
&& curbuf->b_ffname != NULL
&& curbuf->b_p_udf
&& !filtering
&& !read_fifo
&& !read_stdin
&& !read_buffer);
if (read_undo_file)
sha256_start(&sha_ctx);
#endif
#ifdef FEAT_CRYPT
if (curbuf->b_cryptstate != NULL)
{
/* Need to free the state, but keep the key, don't want to ask for
* it again. */
crypt_free_state(curbuf->b_cryptstate);
curbuf->b_cryptstate = NULL;
}
#endif
}
while (!error && !got_int)
{
/*
* We allocate as much space for the file as we can get, plus
* space for the old line plus room for one terminating NUL.
* The amount is limited by the fact that read() only can read
* upto max_unsigned characters (and other things).
*/
#if VIM_SIZEOF_INT <= 2
if (linerest >= 0x7ff0)
{
++split;
*ptr = NL; /* split line by inserting a NL */
size = 1;
}
else
#endif
{
if (!skip_read)
{
#if VIM_SIZEOF_INT > 2
# if defined(SSIZE_MAX) && (SSIZE_MAX < 0x10000L)
size = SSIZE_MAX; /* use max I/O size, 52K */
# else
size = 0x10000L; /* use buffer >= 64K */
# endif
#else
size = 0x7ff0L - linerest; /* limit buffer to 32K */
#endif
for ( ; size >= 10; size = (long)((long_u)size >> 1))
{
if ((new_buffer = lalloc((long_u)(size + linerest + 1),
FALSE)) != NULL)
break;
}
if (new_buffer == NULL)
{
do_outofmem_msg((long_u)(size * 2 + linerest + 1));
error = TRUE;
break;
}
if (linerest) /* copy characters from the previous buffer */
mch_memmove(new_buffer, ptr - linerest, (size_t)linerest);
vim_free(buffer);
buffer = new_buffer;
ptr = buffer + linerest;
line_start = buffer;
#ifdef FEAT_MBYTE
/* May need room to translate into.
* For iconv() we don't really know the required space, use a
* factor ICONV_MULT.
* latin1 to utf-8: 1 byte becomes up to 2 bytes
* utf-16 to utf-8: 2 bytes become up to 3 bytes, 4 bytes
* become up to 4 bytes, size must be multiple of 2
* ucs-2 to utf-8: 2 bytes become up to 3 bytes, size must be
* multiple of 2
* ucs-4 to utf-8: 4 bytes become up to 6 bytes, size must be
* multiple of 4 */
real_size = (int)size;
# ifdef USE_ICONV
if (iconv_fd != (iconv_t)-1)
size = size / ICONV_MULT;
else
# endif
if (fio_flags & FIO_LATIN1)
size = size / 2;
else if (fio_flags & (FIO_UCS2 | FIO_UTF16))
size = (size * 2 / 3) & ~1;
else if (fio_flags & FIO_UCS4)
size = (size * 2 / 3) & ~3;
else if (fio_flags == FIO_UCSBOM)
size = size / ICONV_MULT; /* worst case */
# ifdef WIN3264
else if (fio_flags & FIO_CODEPAGE)
size = size / ICONV_MULT; /* also worst case */
# endif
# ifdef MACOS_CONVERT
else if (fio_flags & FIO_MACROMAN)
size = size / ICONV_MULT; /* also worst case */
# endif
#endif
#ifdef FEAT_MBYTE
if (conv_restlen > 0)
{
/* Insert unconverted bytes from previous line. */
mch_memmove(ptr, conv_rest, conv_restlen);
ptr += conv_restlen;
size -= conv_restlen;
}
#endif
if (read_buffer)
{
/*
* Read bytes from curbuf. Used for converting text read
* from stdin.
*/
if (read_buf_lnum > from)
size = 0;
else
{
int n, ni;
long tlen;
tlen = 0;
for (;;)
{
p = ml_get(read_buf_lnum) + read_buf_col;
n = (int)STRLEN(p);
if ((int)tlen + n + 1 > size)
{
/* Filled up to "size", append partial line.
* Change NL to NUL to reverse the effect done
* below. */
n = (int)(size - tlen);
for (ni = 0; ni < n; ++ni)
{
if (p[ni] == NL)
ptr[tlen++] = NUL;
else
ptr[tlen++] = p[ni];
}
read_buf_col += n;
break;
}
else
{
/* Append whole line and new-line. Change NL
* to NUL to reverse the effect done below. */
for (ni = 0; ni < n; ++ni)
{
if (p[ni] == NL)
ptr[tlen++] = NUL;
else
ptr[tlen++] = p[ni];
}
ptr[tlen++] = NL;
read_buf_col = 0;
if (++read_buf_lnum > from)
{
/* When the last line didn't have an
* end-of-line don't add it now either. */
if (!curbuf->b_p_eol)
--tlen;
size = tlen;
break;
}
}
}
}
}
else
{
/*
* Read bytes from the file.
*/
size = read_eintr(fd, ptr, size);
}
#ifdef FEAT_CRYPT
/*
* At start of file: Check for magic number of encryption.
*/
if (filesize == 0 && size > 0)
cryptkey = check_for_cryptkey(cryptkey, ptr, &size,
&filesize, newfile, sfname,
&did_ask_for_key);
/*
* Decrypt the read bytes. This is done before checking for
* EOF because the crypt layer may be buffering.
*/
if (cryptkey != NULL && curbuf->b_cryptstate != NULL
&& size > 0)
{
if (crypt_works_inplace(curbuf->b_cryptstate))
{
crypt_decode_inplace(curbuf->b_cryptstate, ptr, size);
}
else
{
char_u *newptr = NULL;
int decrypted_size;
decrypted_size = crypt_decode_alloc(
curbuf->b_cryptstate, ptr, size, &newptr);
/* If the crypt layer is buffering, not producing
* anything yet, need to read more. */
if (size > 0 && decrypted_size == 0)
continue;
if (linerest == 0)
{
/* Simple case: reuse returned buffer (may be
* NULL, checked later). */
new_buffer = newptr;
}
else
{
long_u new_size;
/* Need new buffer to add bytes carried over. */
new_size = (long_u)(decrypted_size + linerest + 1);
new_buffer = lalloc(new_size, FALSE);
if (new_buffer == NULL)
{
do_outofmem_msg(new_size);
error = TRUE;
break;
}
mch_memmove(new_buffer, buffer, linerest);
if (newptr != NULL)
mch_memmove(new_buffer + linerest, newptr,
decrypted_size);
}
if (new_buffer != NULL)
{
vim_free(buffer);
buffer = new_buffer;
new_buffer = NULL;
line_start = buffer;
ptr = buffer + linerest;
}
size = decrypted_size;
}
}
#endif
if (size <= 0)
{
if (size < 0) /* read error */
error = TRUE;
#ifdef FEAT_MBYTE
else if (conv_restlen > 0)
{
/*
* Reached end-of-file but some trailing bytes could
* not be converted. Truncated file?
*/
/* When we did a conversion report an error. */
if (fio_flags != 0
# ifdef USE_ICONV
|| iconv_fd != (iconv_t)-1
# endif
)
{
if (can_retry)
goto rewind_retry;
if (conv_error == 0)
conv_error = curbuf->b_ml.ml_line_count
- linecnt + 1;
}
/* Remember the first linenr with an illegal byte */
else if (illegal_byte == 0)
illegal_byte = curbuf->b_ml.ml_line_count
- linecnt + 1;
if (bad_char_behavior == BAD_DROP)
{
*(ptr - conv_restlen) = NUL;
conv_restlen = 0;
}
else
{
/* Replace the trailing bytes with the replacement
* character if we were converting; if we weren't,
* leave the UTF8 checking code to do it, as it
* works slightly differently. */
if (bad_char_behavior != BAD_KEEP && (fio_flags != 0
# ifdef USE_ICONV
|| iconv_fd != (iconv_t)-1
# endif
))
{
while (conv_restlen > 0)
{
*(--ptr) = bad_char_behavior;
--conv_restlen;
}
}
fio_flags = 0; /* don't convert this */
# ifdef USE_ICONV
if (iconv_fd != (iconv_t)-1)
{
iconv_close(iconv_fd);
iconv_fd = (iconv_t)-1;
}
# endif
}
}
#endif
}
}
skip_read = FALSE;
#ifdef FEAT_MBYTE
/*
* At start of file (or after crypt magic number): Check for BOM.
* Also check for a BOM for other Unicode encodings, but not after
* converting with 'charconvert' or when a BOM has already been
* found.
*/
if ((filesize == 0
# ifdef FEAT_CRYPT
|| (cryptkey != NULL
&& filesize == crypt_get_header_len(
crypt_get_method_nr(curbuf)))
# endif
)
&& (fio_flags == FIO_UCSBOM
|| (!curbuf->b_p_bomb
&& tmpname == NULL
&& (*fenc == 'u' || (*fenc == NUL && enc_utf8)))))
{
char_u *ccname;
int blen;
/* no BOM detection in a short file or in binary mode */
if (size < 2 || curbuf->b_p_bin)
ccname = NULL;
else
ccname = check_for_bom(ptr, size, &blen,
fio_flags == FIO_UCSBOM ? FIO_ALL : get_fio_flags(fenc));
if (ccname != NULL)
{
/* Remove BOM from the text */
filesize += blen;
size -= blen;
mch_memmove(ptr, ptr + blen, (size_t)size);
if (set_options)
{
curbuf->b_p_bomb = TRUE;
curbuf->b_start_bomb = TRUE;
}
}
if (fio_flags == FIO_UCSBOM)
{
if (ccname == NULL)
{
/* No BOM detected: retry with next encoding. */
advance_fenc = TRUE;
}
else
{
/* BOM detected: set "fenc" and jump back */
if (fenc_alloced)
vim_free(fenc);
fenc = ccname;
fenc_alloced = FALSE;
}
/* retry reading without getting new bytes or rewinding */
skip_read = TRUE;
goto retry;
}
}
/* Include not converted bytes. */
ptr -= conv_restlen;
size += conv_restlen;
conv_restlen = 0;
#endif
/*
* Break here for a read error or end-of-file.
*/
if (size <= 0)
break;
#ifdef FEAT_MBYTE
# ifdef USE_ICONV
if (iconv_fd != (iconv_t)-1)
{
/*
* Attempt conversion of the read bytes to 'encoding' using
* iconv().
*/
const char *fromp;
char *top;
size_t from_size;
size_t to_size;
fromp = (char *)ptr;
from_size = size;
ptr += size;
top = (char *)ptr;
to_size = real_size - size;
/*
* If there is conversion error or not enough room try using
* another conversion. Except for when there is no
* alternative (help files).
*/
while ((iconv(iconv_fd, (void *)&fromp, &from_size,
&top, &to_size)
== (size_t)-1 && ICONV_ERRNO != ICONV_EINVAL)
|| from_size > CONV_RESTLEN)
{
if (can_retry)
goto rewind_retry;
if (conv_error == 0)
conv_error = readfile_linenr(linecnt,
ptr, (char_u *)top);
/* Deal with a bad byte and continue with the next. */
++fromp;
--from_size;
if (bad_char_behavior == BAD_KEEP)
{
*top++ = *(fromp - 1);
--to_size;
}
else if (bad_char_behavior != BAD_DROP)
{
*top++ = bad_char_behavior;
--to_size;
}
}
if (from_size > 0)
{
/* Some remaining characters, keep them for the next
* round. */
mch_memmove(conv_rest, (char_u *)fromp, from_size);
conv_restlen = (int)from_size;
}
/* move the linerest to before the converted characters */
line_start = ptr - linerest;
mch_memmove(line_start, buffer, (size_t)linerest);
size = (long)((char_u *)top - ptr);
}
# endif
# ifdef WIN3264
if (fio_flags & FIO_CODEPAGE)
{
char_u *src, *dst;
WCHAR ucs2buf[3];
int ucs2len;
int codepage = FIO_GET_CP(fio_flags);
int bytelen;
int found_bad;
char replstr[2];
/*
* Conversion from an MS-Windows codepage or UTF-8 to UTF-8 or
* a codepage, using standard MS-Windows functions. This
* requires two steps:
* 1. convert from 'fileencoding' to ucs-2
* 2. convert from ucs-2 to 'encoding'
*
* Because there may be illegal bytes AND an incomplete byte
* sequence at the end, we may have to do the conversion one
* character at a time to get it right.
*/
/* Replacement string for WideCharToMultiByte(). */
if (bad_char_behavior > 0)
replstr[0] = bad_char_behavior;
else
replstr[0] = '?';
replstr[1] = NUL;
/*
* Move the bytes to the end of the buffer, so that we have
* room to put the result at the start.
*/
src = ptr + real_size - size;
mch_memmove(src, ptr, size);
/*
* Do the conversion.
*/
dst = ptr;
size = size;
while (size > 0)
{
found_bad = FALSE;
# ifdef CP_UTF8 /* VC 4.1 doesn't define CP_UTF8 */
if (codepage == CP_UTF8)
{
/* Handle CP_UTF8 input ourselves to be able to handle
* trailing bytes properly.
* Get one UTF-8 character from src. */
bytelen = (int)utf_ptr2len_len(src, size);
if (bytelen > size)
{
/* Only got some bytes of a character. Normally
* it's put in "conv_rest", but if it's too long
* deal with it as if they were illegal bytes. */
if (bytelen <= CONV_RESTLEN)
break;
/* weird overlong byte sequence */
bytelen = size;
found_bad = TRUE;
}
else
{
int u8c = utf_ptr2char(src);
if (u8c > 0xffff || (*src >= 0x80 && bytelen == 1))
found_bad = TRUE;
ucs2buf[0] = u8c;
ucs2len = 1;
}
}
else
# endif
{
/* We don't know how long the byte sequence is, try
* from one to three bytes. */
for (bytelen = 1; bytelen <= size && bytelen <= 3;
++bytelen)
{
ucs2len = MultiByteToWideChar(codepage,
MB_ERR_INVALID_CHARS,
(LPCSTR)src, bytelen,
ucs2buf, 3);
if (ucs2len > 0)
break;
}
if (ucs2len == 0)
{
/* If we have only one byte then it's probably an
* incomplete byte sequence. Otherwise discard
* one byte as a bad character. */
if (size == 1)
break;
found_bad = TRUE;
bytelen = 1;
}
}
if (!found_bad)
{
int i;
/* Convert "ucs2buf[ucs2len]" to 'enc' in "dst". */
if (enc_utf8)
{
/* From UCS-2 to UTF-8. Cannot fail. */
for (i = 0; i < ucs2len; ++i)
dst += utf_char2bytes(ucs2buf[i], dst);
}
else
{
BOOL bad = FALSE;
int dstlen;
/* From UCS-2 to "enc_codepage". If the
* conversion uses the default character "?",
* the data doesn't fit in this encoding. */
dstlen = WideCharToMultiByte(enc_codepage, 0,
(LPCWSTR)ucs2buf, ucs2len,
(LPSTR)dst, (int)(src - dst),
replstr, &bad);
if (bad)
found_bad = TRUE;
else
dst += dstlen;
}
}
if (found_bad)
{
/* Deal with bytes we can't convert. */
if (can_retry)
goto rewind_retry;
if (conv_error == 0)
conv_error = readfile_linenr(linecnt, ptr, dst);
if (bad_char_behavior != BAD_DROP)
{
if (bad_char_behavior == BAD_KEEP)
{
mch_memmove(dst, src, bytelen);
dst += bytelen;
}
else
*dst++ = bad_char_behavior;
}
}
src += bytelen;
size -= bytelen;
}
if (size > 0)
{
/* An incomplete byte sequence remaining. */
mch_memmove(conv_rest, src, size);
conv_restlen = size;
}
/* The new size is equal to how much "dst" was advanced. */
size = (long)(dst - ptr);
}
else
# endif
# ifdef MACOS_CONVERT
if (fio_flags & FIO_MACROMAN)
{
/*
* Conversion from Apple MacRoman char encoding to UTF-8 or
* latin1. This is in os_mac_conv.c.
*/
if (macroman2enc(ptr, &size, real_size) == FAIL)
goto rewind_retry;
}
else
# endif
if (fio_flags != 0)
{
int u8c;
char_u *dest;
char_u *tail = NULL;
/*
* "enc_utf8" set: Convert Unicode or Latin1 to UTF-8.
* "enc_utf8" not set: Convert Unicode to Latin1.
* Go from end to start through the buffer, because the number
* of bytes may increase.
* "dest" points to after where the UTF-8 bytes go, "p" points
* to after the next character to convert.
*/
dest = ptr + real_size;
if (fio_flags == FIO_LATIN1 || fio_flags == FIO_UTF8)
{
p = ptr + size;
if (fio_flags == FIO_UTF8)
{
/* Check for a trailing incomplete UTF-8 sequence */
tail = ptr + size - 1;
while (tail > ptr && (*tail & 0xc0) == 0x80)
--tail;
if (tail + utf_byte2len(*tail) <= ptr + size)
tail = NULL;
else
p = tail;
}
}
else if (fio_flags & (FIO_UCS2 | FIO_UTF16))
{
/* Check for a trailing byte */
p = ptr + (size & ~1);
if (size & 1)
tail = p;
if ((fio_flags & FIO_UTF16) && p > ptr)
{
/* Check for a trailing leading word */
if (fio_flags & FIO_ENDIAN_L)
{
u8c = (*--p << 8);
u8c += *--p;
}
else
{
u8c = *--p;
u8c += (*--p << 8);
}
if (u8c >= 0xd800 && u8c <= 0xdbff)
tail = p;
else
p += 2;
}
}
else /* FIO_UCS4 */
{
/* Check for trailing 1, 2 or 3 bytes */
p = ptr + (size & ~3);
if (size & 3)
tail = p;
}
/* If there is a trailing incomplete sequence move it to
* conv_rest[]. */
if (tail != NULL)
{
conv_restlen = (int)((ptr + size) - tail);
mch_memmove(conv_rest, (char_u *)tail, conv_restlen);
size -= conv_restlen;
}
while (p > ptr)
{
if (fio_flags & FIO_LATIN1)
u8c = *--p;
else if (fio_flags & (FIO_UCS2 | FIO_UTF16))
{
if (fio_flags & FIO_ENDIAN_L)
{
u8c = (*--p << 8);
u8c += *--p;
}
else
{
u8c = *--p;
u8c += (*--p << 8);
}
if ((fio_flags & FIO_UTF16)
&& u8c >= 0xdc00 && u8c <= 0xdfff)
{
int u16c;
if (p == ptr)
{
/* Missing leading word. */
if (can_retry)
goto rewind_retry;
if (conv_error == 0)
conv_error = readfile_linenr(linecnt,
ptr, p);
if (bad_char_behavior == BAD_DROP)
continue;
if (bad_char_behavior != BAD_KEEP)
u8c = bad_char_behavior;
}
/* found second word of double-word, get the first
* word and compute the resulting character */
if (fio_flags & FIO_ENDIAN_L)
{
u16c = (*--p << 8);
u16c += *--p;
}
else
{
u16c = *--p;
u16c += (*--p << 8);
}
u8c = 0x10000 + ((u16c & 0x3ff) << 10)
+ (u8c & 0x3ff);
/* Check if the word is indeed a leading word. */
if (u16c < 0xd800 || u16c > 0xdbff)
{
if (can_retry)
goto rewind_retry;
if (conv_error == 0)
conv_error = readfile_linenr(linecnt,
ptr, p);
if (bad_char_behavior == BAD_DROP)
continue;
if (bad_char_behavior != BAD_KEEP)
u8c = bad_char_behavior;
}
}
}
else if (fio_flags & FIO_UCS4)
{
if (fio_flags & FIO_ENDIAN_L)
{
u8c = (unsigned)*--p << 24;
u8c += (unsigned)*--p << 16;
u8c += (unsigned)*--p << 8;
u8c += *--p;
}
else /* big endian */
{
u8c = *--p;
u8c += (unsigned)*--p << 8;
u8c += (unsigned)*--p << 16;
u8c += (unsigned)*--p << 24;
}
}
else /* UTF-8 */
{
if (*--p < 0x80)
u8c = *p;
else
{
len = utf_head_off(ptr, p);
p -= len;
u8c = utf_ptr2char(p);
if (len == 0)
{
/* Not a valid UTF-8 character, retry with
* another fenc when possible, otherwise just
* report the error. */
if (can_retry)
goto rewind_retry;
if (conv_error == 0)
conv_error = readfile_linenr(linecnt,
ptr, p);
if (bad_char_behavior == BAD_DROP)
continue;
if (bad_char_behavior != BAD_KEEP)
u8c = bad_char_behavior;
}
}
}
if (enc_utf8) /* produce UTF-8 */
{
dest -= utf_char2len(u8c);
(void)utf_char2bytes(u8c, dest);
}
else /* produce Latin1 */
{
--dest;
if (u8c >= 0x100)
{
/* character doesn't fit in latin1, retry with
* another fenc when possible, otherwise just
* report the error. */
if (can_retry)
goto rewind_retry;
if (conv_error == 0)
conv_error = readfile_linenr(linecnt, ptr, p);
if (bad_char_behavior == BAD_DROP)
++dest;
else if (bad_char_behavior == BAD_KEEP)
*dest = u8c;
else if (eap != NULL && eap->bad_char != 0)
*dest = bad_char_behavior;
else
*dest = 0xBF;
}
else
*dest = u8c;
}
}
/* move the linerest to before the converted characters */
line_start = dest - linerest;
mch_memmove(line_start, buffer, (size_t)linerest);
size = (long)((ptr + real_size) - dest);
ptr = dest;
}
else if (enc_utf8 && !curbuf->b_p_bin)
{
int incomplete_tail = FALSE;
/* Reading UTF-8: Check if the bytes are valid UTF-8. */
for (p = ptr; ; ++p)
{
int todo = (int)((ptr + size) - p);
int l;
if (todo <= 0)
break;
if (*p >= 0x80)
{
/* A length of 1 means it's an illegal byte. Accept
* an incomplete character at the end though, the next
* read() will get the next bytes, we'll check it
* then. */
l = utf_ptr2len_len(p, todo);
if (l > todo && !incomplete_tail)
{
/* Avoid retrying with a different encoding when
* a truncated file is more likely, or attempting
* to read the rest of an incomplete sequence when
* we have already done so. */
if (p > ptr || filesize > 0)
incomplete_tail = TRUE;
/* Incomplete byte sequence, move it to conv_rest[]
* and try to read the rest of it, unless we've
* already done so. */
if (p > ptr)
{
conv_restlen = todo;
mch_memmove(conv_rest, p, conv_restlen);
size -= conv_restlen;
break;
}
}
if (l == 1 || l > todo)
{
/* Illegal byte. If we can try another encoding
* do that, unless at EOF where a truncated
* file is more likely than a conversion error. */
if (can_retry && !incomplete_tail)
break;
# ifdef USE_ICONV
/* When we did a conversion report an error. */
if (iconv_fd != (iconv_t)-1 && conv_error == 0)
conv_error = readfile_linenr(linecnt, ptr, p);
# endif
/* Remember the first linenr with an illegal byte */
if (conv_error == 0 && illegal_byte == 0)
illegal_byte = readfile_linenr(linecnt, ptr, p);
/* Drop, keep or replace the bad byte. */
if (bad_char_behavior == BAD_DROP)
{
mch_memmove(p, p + 1, todo - 1);
--p;
--size;
}
else if (bad_char_behavior != BAD_KEEP)
*p = bad_char_behavior;
}
else
p += l - 1;
}
}
if (p < ptr + size && !incomplete_tail)
{
/* Detected a UTF-8 error. */
rewind_retry:
/* Retry reading with another conversion. */
# if defined(FEAT_EVAL) && defined(USE_ICONV)
if (*p_ccv != NUL && iconv_fd != (iconv_t)-1)
/* iconv() failed, try 'charconvert' */
did_iconv = TRUE;
else
# endif
/* use next item from 'fileencodings' */
advance_fenc = TRUE;
file_rewind = TRUE;
goto retry;
}
}
#endif
/* count the number of characters (after conversion!) */
filesize += size;
/*
* when reading the first part of a file: guess EOL type
*/
if (fileformat == EOL_UNKNOWN)
{
/* First try finding a NL, for Dos and Unix */
if (try_dos || try_unix)
{
/* Reset the carriage return counter. */
if (try_mac)
try_mac = 1;
for (p = ptr; p < ptr + size; ++p)
{
if (*p == NL)
{
if (!try_unix
|| (try_dos && p > ptr && p[-1] == CAR))
fileformat = EOL_DOS;
else
fileformat = EOL_UNIX;
break;
}
else if (*p == CAR && try_mac)
try_mac++;
}
/* Don't give in to EOL_UNIX if EOL_MAC is more likely */
if (fileformat == EOL_UNIX && try_mac)
{
/* Need to reset the counters when retrying fenc. */
try_mac = 1;
try_unix = 1;
for (; p >= ptr && *p != CAR; p--)
;
if (p >= ptr)
{
for (p = ptr; p < ptr + size; ++p)
{
if (*p == NL)
try_unix++;
else if (*p == CAR)
try_mac++;
}
if (try_mac > try_unix)
fileformat = EOL_MAC;
}
}
else if (fileformat == EOL_UNKNOWN && try_mac == 1)
/* Looking for CR but found no end-of-line markers at
* all: use the default format. */
fileformat = default_fileformat();
}
/* No NL found: may use Mac format */
if (fileformat == EOL_UNKNOWN && try_mac)
fileformat = EOL_MAC;
/* Still nothing found? Use first format in 'ffs' */
if (fileformat == EOL_UNKNOWN)
fileformat = default_fileformat();
/* if editing a new file: may set p_tx and p_ff */
if (set_options)
set_fileformat(fileformat, OPT_LOCAL);
}
}
/*
* This loop is executed once for every character read.
* Keep it fast!
*/
if (fileformat == EOL_MAC)
{
--ptr;
while (++ptr, --size >= 0)
{
/* catch most common case first */
if ((c = *ptr) != NUL && c != CAR && c != NL)
continue;
if (c == NUL)
*ptr = NL; /* NULs are replaced by newlines! */
else if (c == NL)
*ptr = CAR; /* NLs are replaced by CRs! */
else
{
if (skip_count == 0)
{
*ptr = NUL; /* end of line */
len = (colnr_T) (ptr - line_start + 1);
if (ml_append(lnum, line_start, len, newfile) == FAIL)
{
error = TRUE;
break;
}
#ifdef FEAT_PERSISTENT_UNDO
if (read_undo_file)
sha256_update(&sha_ctx, line_start, len);
#endif
++lnum;
if (--read_count == 0)
{
error = TRUE; /* break loop */
line_start = ptr; /* nothing left to write */
break;
}
}
else
--skip_count;
line_start = ptr + 1;
}
}
}
else
{
--ptr;
while (++ptr, --size >= 0)
{
if ((c = *ptr) != NUL && c != NL) /* catch most common case */
continue;
if (c == NUL)
*ptr = NL; /* NULs are replaced by newlines! */
else
{
if (skip_count == 0)
{
*ptr = NUL; /* end of line */
len = (colnr_T)(ptr - line_start + 1);
if (fileformat == EOL_DOS)
{
if (ptr > line_start && ptr[-1] == CAR)
{
/* remove CR before NL */
ptr[-1] = NUL;
--len;
}
/*
* Reading in Dos format, but no CR-LF found!
* When 'fileformats' includes "unix", delete all
* the lines read so far and start all over again.
* Otherwise give an error message later.
*/
else if (ff_error != EOL_DOS)
{
if ( try_unix
&& !read_stdin
&& (read_buffer
|| vim_lseek(fd, (off_T)0L, SEEK_SET)
== 0))
{
fileformat = EOL_UNIX;
if (set_options)
set_fileformat(EOL_UNIX, OPT_LOCAL);
file_rewind = TRUE;
keep_fileformat = TRUE;
goto retry;
}
ff_error = EOL_DOS;
}
}
if (ml_append(lnum, line_start, len, newfile) == FAIL)
{
error = TRUE;
break;
}
#ifdef FEAT_PERSISTENT_UNDO
if (read_undo_file)
sha256_update(&sha_ctx, line_start, len);
#endif
++lnum;
if (--read_count == 0)
{
error = TRUE; /* break loop */
line_start = ptr; /* nothing left to write */
break;
}
}
else
--skip_count;
line_start = ptr + 1;
}
}
}
linerest = (long)(ptr - line_start);
ui_breakcheck();
}
failed:
/* not an error, max. number of lines reached */
if (error && read_count == 0)
error = FALSE;
/*
* If we get EOF in the middle of a line, note the fact and
* complete the line ourselves.
* In Dos format ignore a trailing CTRL-Z, unless 'binary' set.
*/
if (!error
&& !got_int
&& linerest != 0
&& !(!curbuf->b_p_bin
&& fileformat == EOL_DOS
&& *line_start == Ctrl_Z
&& ptr == line_start + 1))
{
/* remember for when writing */
if (set_options)
curbuf->b_p_eol = FALSE;
*ptr = NUL;
len = (colnr_T)(ptr - line_start + 1);
if (ml_append(lnum, line_start, len, newfile) == FAIL)
error = TRUE;
else
{
#ifdef FEAT_PERSISTENT_UNDO
if (read_undo_file)
sha256_update(&sha_ctx, line_start, len);
#endif
read_no_eol_lnum = ++lnum;
}
}
if (set_options)
save_file_ff(curbuf); /* remember the current file format */
#ifdef FEAT_CRYPT
if (curbuf->b_cryptstate != NULL)
{
crypt_free_state(curbuf->b_cryptstate);
curbuf->b_cryptstate = NULL;
}
if (cryptkey != NULL && cryptkey != curbuf->b_p_key)
crypt_free_key(cryptkey);
/* Don't set cryptkey to NULL, it's used below as a flag that
* encryption was used. */
#endif
#ifdef FEAT_MBYTE
/* If editing a new file: set 'fenc' for the current buffer.
* Also for ":read ++edit file". */
if (set_options)
set_string_option_direct((char_u *)"fenc", -1, fenc,
OPT_FREE|OPT_LOCAL, 0);
if (fenc_alloced)
vim_free(fenc);
# ifdef USE_ICONV
if (iconv_fd != (iconv_t)-1)
{
iconv_close(iconv_fd);
iconv_fd = (iconv_t)-1;
}
# endif
#endif
if (!read_buffer && !read_stdin)
close(fd); /* errors are ignored */
#ifdef HAVE_FD_CLOEXEC
else
{
int fdflags = fcntl(fd, F_GETFD);
if (fdflags >= 0 && (fdflags & FD_CLOEXEC) == 0)
(void)fcntl(fd, F_SETFD, fdflags | FD_CLOEXEC);
}
#endif
vim_free(buffer);
#ifdef HAVE_DUP
if (read_stdin)
{
/* Use stderr for stdin, makes shell commands work. */
close(0);
ignored = dup(2);
}
#endif
#ifdef FEAT_MBYTE
if (tmpname != NULL)
{
mch_remove(tmpname); /* delete converted file */
vim_free(tmpname);
}
#endif
--no_wait_return; /* may wait for return now */
/*
* In recovery mode everything but autocommands is skipped.
*/
if (!recoverymode)
{
/* need to delete the last line, which comes from the empty buffer */
if (newfile && wasempty && !(curbuf->b_ml.ml_flags & ML_EMPTY))
{
#ifdef FEAT_NETBEANS_INTG
netbeansFireChanges = 0;
#endif
ml_delete(curbuf->b_ml.ml_line_count, FALSE);
#ifdef FEAT_NETBEANS_INTG
netbeansFireChanges = 1;
#endif
--linecnt;
}
linecnt = curbuf->b_ml.ml_line_count - linecnt;
if (filesize == 0)
linecnt = 0;
if (newfile || read_buffer)
{
redraw_curbuf_later(NOT_VALID);
#ifdef FEAT_DIFF
/* After reading the text into the buffer the diff info needs to
* be updated. */
diff_invalidate(curbuf);
#endif
#ifdef FEAT_FOLDING
/* All folds in the window are invalid now. Mark them for update
* before triggering autocommands. */
foldUpdateAll(curwin);
#endif
}
else if (linecnt) /* appended at least one line */
appended_lines_mark(from, linecnt);
#ifndef ALWAYS_USE_GUI
/*
* If we were reading from the same terminal as where messages go,
* the screen will have been messed up.
* Switch on raw mode now and clear the screen.
*/
if (read_stdin)
{
settmode(TMODE_RAW); /* set to raw mode */
starttermcap();
screenclear();
}
#endif
if (got_int)
{
if (!(flags & READ_DUMMY))
{
filemess(curbuf, sfname, (char_u *)_(e_interr), 0);
if (newfile)
curbuf->b_p_ro = TRUE; /* must use "w!" now */
}
msg_scroll = msg_save;
#ifdef FEAT_VIMINFO
check_marks_read();
#endif
return OK; /* an interrupt isn't really an error */
}
if (!filtering && !(flags & READ_DUMMY))
{
msg_add_fname(curbuf, sfname); /* fname in IObuff with quotes */
c = FALSE;
#ifdef UNIX
# ifdef S_ISFIFO
if (S_ISFIFO(perm)) /* fifo or socket */
{
STRCAT(IObuff, _("[fifo/socket]"));
c = TRUE;
}
# else
# ifdef S_IFIFO
if ((perm & S_IFMT) == S_IFIFO) /* fifo */
{
STRCAT(IObuff, _("[fifo]"));
c = TRUE;
}
# endif
# ifdef S_IFSOCK
if ((perm & S_IFMT) == S_IFSOCK) /* or socket */
{
STRCAT(IObuff, _("[socket]"));
c = TRUE;
}
# endif
# endif
# ifdef OPEN_CHR_FILES
if (S_ISCHR(perm)) /* or character special */
{
STRCAT(IObuff, _("[character special]"));
c = TRUE;
}
# endif
#endif
if (curbuf->b_p_ro)
{
STRCAT(IObuff, shortmess(SHM_RO) ? _("[RO]") : _("[readonly]"));
c = TRUE;
}
if (read_no_eol_lnum)
{
msg_add_eol();
c = TRUE;
}
if (ff_error == EOL_DOS)
{
STRCAT(IObuff, _("[CR missing]"));
c = TRUE;
}
if (split)
{
STRCAT(IObuff, _("[long lines split]"));
c = TRUE;
}
#ifdef FEAT_MBYTE
if (notconverted)
{
STRCAT(IObuff, _("[NOT converted]"));
c = TRUE;
}
else if (converted)
{
STRCAT(IObuff, _("[converted]"));
c = TRUE;
}
#endif
#ifdef FEAT_CRYPT
if (cryptkey != NULL)
{
crypt_append_msg(curbuf);
c = TRUE;
}
#endif
#ifdef FEAT_MBYTE
if (conv_error != 0)
{
sprintf((char *)IObuff + STRLEN(IObuff),
_("[CONVERSION ERROR in line %ld]"), (long)conv_error);
c = TRUE;
}
else if (illegal_byte > 0)
{
sprintf((char *)IObuff + STRLEN(IObuff),
_("[ILLEGAL BYTE in line %ld]"), (long)illegal_byte);
c = TRUE;
}
else
#endif
if (error)
{
STRCAT(IObuff, _("[READ ERRORS]"));
c = TRUE;
}
if (msg_add_fileformat(fileformat))
c = TRUE;
#ifdef FEAT_CRYPT
if (cryptkey != NULL)
msg_add_lines(c, (long)linecnt, filesize
- crypt_get_header_len(crypt_get_method_nr(curbuf)));
else
#endif
msg_add_lines(c, (long)linecnt, filesize);
vim_free(keep_msg);
keep_msg = NULL;
msg_scrolled_ign = TRUE;
#ifdef ALWAYS_USE_GUI
/* Don't show the message when reading stdin, it would end up in a
* message box (which might be shown when exiting!) */
if (read_stdin || read_buffer)
p = msg_may_trunc(FALSE, IObuff);
else
#endif
p = msg_trunc_attr(IObuff, FALSE, 0);
if (read_stdin || read_buffer || restart_edit != 0
|| (msg_scrolled != 0 && !need_wait_return))
/* Need to repeat the message after redrawing when:
* - When reading from stdin (the screen will be cleared next).
* - When restart_edit is set (otherwise there will be a delay
* before redrawing).
* - When the screen was scrolled but there is no wait-return
* prompt. */
set_keep_msg(p, 0);
msg_scrolled_ign = FALSE;
}
/* with errors writing the file requires ":w!" */
if (newfile && (error
#ifdef FEAT_MBYTE
|| conv_error != 0
|| (illegal_byte > 0 && bad_char_behavior != BAD_KEEP)
#endif
))
curbuf->b_p_ro = TRUE;
u_clearline(); /* cannot use "U" command after adding lines */
/*
* In Ex mode: cursor at last new line.
* Otherwise: cursor at first new line.
*/
if (exmode_active)
curwin->w_cursor.lnum = from + linecnt;
else
curwin->w_cursor.lnum = from + 1;
check_cursor_lnum();
beginline(BL_WHITE | BL_FIX); /* on first non-blank */
/*
* Set '[ and '] marks to the newly read lines.
*/
curbuf->b_op_start.lnum = from + 1;
curbuf->b_op_start.col = 0;
curbuf->b_op_end.lnum = from + linecnt;
curbuf->b_op_end.col = 0;
#ifdef WIN32
/*
* Work around a weird problem: When a file has two links (only
* possible on NTFS) and we write through one link, then stat() it
* through the other link, the timestamp information may be wrong.
* It's correct again after reading the file, thus reset the timestamp
* here.
*/
if (newfile && !read_stdin && !read_buffer
&& mch_stat((char *)fname, &st) >= 0)
{
buf_store_time(curbuf, &st, fname);
curbuf->b_mtime_read = curbuf->b_mtime;
}
#endif
}
msg_scroll = msg_save;
#ifdef FEAT_VIMINFO
/*
* Get the marks before executing autocommands, so they can be used there.
*/
check_marks_read();
#endif
/*
* We remember if the last line of the read didn't have
* an eol even when 'binary' is off, to support turning 'fixeol' off,
* or writing the read again with 'binary' on. The latter is required
* for ":autocmd FileReadPost *.gz set bin|'[,']!gunzip" to work.
*/
curbuf->b_no_eol_lnum = read_no_eol_lnum;
/* When reloading a buffer put the cursor at the first line that is
* different. */
if (flags & READ_KEEP_UNDO)
u_find_first_changed();
#ifdef FEAT_PERSISTENT_UNDO
/*
* When opening a new file locate undo info and read it.
*/
if (read_undo_file)
{
char_u hash[UNDO_HASH_SIZE];
sha256_finish(&sha_ctx, hash);
u_read_undo(NULL, hash, fname);
}
#endif
#ifdef FEAT_AUTOCMD
if (!read_stdin && !read_fifo && (!read_buffer || sfname != NULL))
{
int m = msg_scroll;
int n = msg_scrolled;
/* Save the fileformat now, otherwise the buffer will be considered
* modified if the format/encoding was automatically detected. */
if (set_options)
save_file_ff(curbuf);
/*
* The output from the autocommands should not overwrite anything and
* should not be overwritten: Set msg_scroll, restore its value if no
* output was done.
*/
msg_scroll = TRUE;
if (filtering)
apply_autocmds_exarg(EVENT_FILTERREADPOST, NULL, sfname,
FALSE, curbuf, eap);
else if (newfile || (read_buffer && sfname != NULL))
{
apply_autocmds_exarg(EVENT_BUFREADPOST, NULL, sfname,
FALSE, curbuf, eap);
if (!au_did_filetype && *curbuf->b_p_ft != NUL)
/*
* EVENT_FILETYPE was not triggered but the buffer already has a
* filetype. Trigger EVENT_FILETYPE using the existing filetype.
*/
apply_autocmds(EVENT_FILETYPE, curbuf->b_p_ft, curbuf->b_fname,
TRUE, curbuf);
}
else
apply_autocmds_exarg(EVENT_FILEREADPOST, sfname, sfname,
FALSE, NULL, eap);
if (msg_scrolled == n)
msg_scroll = m;
# ifdef FEAT_EVAL
if (aborting()) /* autocmds may abort script processing */
return FAIL;
# endif
}
#endif
if (recoverymode && error)
return FAIL;
return OK;
}
#if defined(OPEN_CHR_FILES) || defined(PROTO)
/*
* Returns TRUE if the file name argument is of the form "/dev/fd/\d\+",
* which is the name of files used for process substitution output by
* some shells on some operating systems, e.g., bash on SunOS.
* Do not accept "/dev/fd/[012]", opening these may hang Vim.
*/
int
is_dev_fd_file(char_u *fname)
{
return (STRNCMP(fname, "/dev/fd/", 8) == 0
&& VIM_ISDIGIT(fname[8])
&& *skipdigits(fname + 9) == NUL
&& (fname[9] != NUL
|| (fname[8] != '0' && fname[8] != '1' && fname[8] != '2')));
}
#endif
#ifdef FEAT_MBYTE
/*
* From the current line count and characters read after that, estimate the
* line number where we are now.
* Used for error messages that include a line number.
*/
static linenr_T
readfile_linenr(
linenr_T linecnt, /* line count before reading more bytes */
char_u *p, /* start of more bytes read */
char_u *endp) /* end of more bytes read */
{
char_u *s;
linenr_T lnum;
lnum = curbuf->b_ml.ml_line_count - linecnt + 1;
for (s = p; s < endp; ++s)
if (*s == '\n')
++lnum;
return lnum;
}
#endif
/*
* Fill "*eap" to force the 'fileencoding', 'fileformat' and 'binary to be
* equal to the buffer "buf". Used for calling readfile().
* Returns OK or FAIL.
*/
int
prep_exarg(exarg_T *eap, buf_T *buf)
{
eap->cmd = alloc((unsigned)(STRLEN(buf->b_p_ff)
#ifdef FEAT_MBYTE
+ STRLEN(buf->b_p_fenc)
#endif
+ 15));
if (eap->cmd == NULL)
return FAIL;
#ifdef FEAT_MBYTE
sprintf((char *)eap->cmd, "e ++ff=%s ++enc=%s", buf->b_p_ff, buf->b_p_fenc);
eap->force_enc = 14 + (int)STRLEN(buf->b_p_ff);
eap->bad_char = buf->b_bad_char;
#else
sprintf((char *)eap->cmd, "e ++ff=%s", buf->b_p_ff);
#endif
eap->force_ff = 7;
eap->force_bin = buf->b_p_bin ? FORCE_BIN : FORCE_NOBIN;
eap->read_edit = FALSE;
eap->forceit = FALSE;
return OK;
}
/*
* Set default or forced 'fileformat' and 'binary'.
*/
void
set_file_options(int set_options, exarg_T *eap)
{
/* set default 'fileformat' */
if (set_options)
{
if (eap != NULL && eap->force_ff != 0)
set_fileformat(get_fileformat_force(curbuf, eap), OPT_LOCAL);
else if (*p_ffs != NUL)
set_fileformat(default_fileformat(), OPT_LOCAL);
}
/* set or reset 'binary' */
if (eap != NULL && eap->force_bin != 0)
{
int oldval = curbuf->b_p_bin;
curbuf->b_p_bin = (eap->force_bin == FORCE_BIN);
set_options_bin(oldval, curbuf->b_p_bin, OPT_LOCAL);
}
}
#if defined(FEAT_MBYTE) || defined(PROTO)
/*
* Set forced 'fileencoding'.
*/
void
set_forced_fenc(exarg_T *eap)
{
if (eap->force_enc != 0)
{
char_u *fenc = enc_canonize(eap->cmd + eap->force_enc);
if (fenc != NULL)
set_string_option_direct((char_u *)"fenc", -1,
fenc, OPT_FREE|OPT_LOCAL, 0);
vim_free(fenc);
}
}
/*
* Find next fileencoding to use from 'fileencodings'.
* "pp" points to fenc_next. It's advanced to the next item.
* When there are no more items, an empty string is returned and *pp is set to
* NULL.
* When *pp is not set to NULL, the result is in allocated memory.
*/
static char_u *
next_fenc(char_u **pp)
{
char_u *p;
char_u *r;
if (**pp == NUL)
{
*pp = NULL;
return (char_u *)"";
}
p = vim_strchr(*pp, ',');
if (p == NULL)
{
r = enc_canonize(*pp);
*pp += STRLEN(*pp);
}
else
{
r = vim_strnsave(*pp, (int)(p - *pp));
*pp = p + 1;
if (r != NULL)
{
p = enc_canonize(r);
vim_free(r);
r = p;
}
}
if (r == NULL) /* out of memory */
{
r = (char_u *)"";
*pp = NULL;
}
return r;
}
# ifdef FEAT_EVAL
/*
* Convert a file with the 'charconvert' expression.
* This closes the file which is to be read, converts it and opens the
* resulting file for reading.
* Returns name of the resulting converted file (the caller should delete it
* after reading it).
* Returns NULL if the conversion failed ("*fdp" is not set) .
*/
static char_u *
readfile_charconvert(
char_u *fname, /* name of input file */
char_u *fenc, /* converted from */
int *fdp) /* in/out: file descriptor of file */
{
char_u *tmpname;
char_u *errmsg = NULL;
tmpname = vim_tempname('r', FALSE);
if (tmpname == NULL)
errmsg = (char_u *)_("Can't find temp file for conversion");
else
{
close(*fdp); /* close the input file, ignore errors */
*fdp = -1;
if (eval_charconvert(fenc, enc_utf8 ? (char_u *)"utf-8" : p_enc,
fname, tmpname) == FAIL)
errmsg = (char_u *)_("Conversion with 'charconvert' failed");
if (errmsg == NULL && (*fdp = mch_open((char *)tmpname,
O_RDONLY | O_EXTRA, 0)) < 0)
errmsg = (char_u *)_("can't read output of 'charconvert'");
}
if (errmsg != NULL)
{
/* Don't use emsg(), it breaks mappings, the retry with
* another type of conversion might still work. */
MSG(errmsg);
if (tmpname != NULL)
{
mch_remove(tmpname); /* delete converted file */
vim_free(tmpname);
tmpname = NULL;
}
}
/* If the input file is closed, open it (caller should check for error). */
if (*fdp < 0)
*fdp = mch_open((char *)fname, O_RDONLY | O_EXTRA, 0);
return tmpname;
}
# endif
#endif
#ifdef FEAT_VIMINFO
/*
* Read marks for the current buffer from the viminfo file, when we support
* buffer marks and the buffer has a name.
*/
static void
check_marks_read(void)
{
if (!curbuf->b_marks_read && get_viminfo_parameter('\'') > 0
&& curbuf->b_ffname != NULL)
read_viminfo(NULL, VIF_WANT_MARKS);
/* Always set b_marks_read; needed when 'viminfo' is changed to include
* the ' parameter after opening a buffer. */
curbuf->b_marks_read = TRUE;
}
#endif
#if defined(FEAT_CRYPT) || defined(PROTO)
/*
* Check for magic number used for encryption. Applies to the current buffer.
* If found, the magic number is removed from ptr[*sizep] and *sizep and
* *filesizep are updated.
* Return the (new) encryption key, NULL for no encryption.
*/
static char_u *
check_for_cryptkey(
char_u *cryptkey, /* previous encryption key or NULL */
char_u *ptr, /* pointer to read bytes */
long *sizep, /* length of read bytes */
off_T *filesizep, /* nr of bytes used from file */
int newfile, /* editing a new buffer */
char_u *fname, /* file name to display */
int *did_ask) /* flag: whether already asked for key */
{
int method = crypt_method_nr_from_magic((char *)ptr, *sizep);
int b_p_ro = curbuf->b_p_ro;
if (method >= 0)
{
/* Mark the buffer as read-only until the decryption has taken place.
* Avoids accidentally overwriting the file with garbage. */
curbuf->b_p_ro = TRUE;
/* Set the cryptmethod local to the buffer. */
crypt_set_cm_option(curbuf, method);
if (cryptkey == NULL && !*did_ask)
{
if (*curbuf->b_p_key)
cryptkey = curbuf->b_p_key;
else
{
/* When newfile is TRUE, store the typed key in the 'key'
* option and don't free it. bf needs hash of the key saved.
* Don't ask for the key again when first time Enter was hit.
* Happens when retrying to detect encoding. */
smsg((char_u *)_(need_key_msg), fname);
msg_scroll = TRUE;
crypt_check_method(method);
cryptkey = crypt_get_key(newfile, FALSE);
*did_ask = TRUE;
/* check if empty key entered */
if (cryptkey != NULL && *cryptkey == NUL)
{
if (cryptkey != curbuf->b_p_key)
vim_free(cryptkey);
cryptkey = NULL;
}
}
}
if (cryptkey != NULL)
{
int header_len;
curbuf->b_cryptstate = crypt_create_from_header(
method, cryptkey, ptr);
crypt_set_cm_option(curbuf, method);
/* Remove cryptmethod specific header from the text. */
header_len = crypt_get_header_len(method);
if (*sizep <= header_len)
/* invalid header, buffer can't be encrypted */
return NULL;
*filesizep += header_len;
*sizep -= header_len;
mch_memmove(ptr, ptr + header_len, (size_t)*sizep);
/* Restore the read-only flag. */
curbuf->b_p_ro = b_p_ro;
}
}
/* When starting to edit a new file which does not have encryption, clear
* the 'key' option, except when starting up (called with -x argument) */
else if (newfile && *curbuf->b_p_key != NUL && !starting)
set_option_value((char_u *)"key", 0L, (char_u *)"", OPT_LOCAL);
return cryptkey;
}
#endif /* FEAT_CRYPT */
#ifdef UNIX
static void
set_file_time(
char_u *fname,
time_t atime, /* access time */
time_t mtime) /* modification time */
{
# if defined(HAVE_UTIME) && defined(HAVE_UTIME_H)
struct utimbuf buf;
buf.actime = atime;
buf.modtime = mtime;
(void)utime((char *)fname, &buf);
# else
# if defined(HAVE_UTIMES)
struct timeval tvp[2];
tvp[0].tv_sec = atime;
tvp[0].tv_usec = 0;
tvp[1].tv_sec = mtime;
tvp[1].tv_usec = 0;
# ifdef NeXT
(void)utimes((char *)fname, tvp);
# else
(void)utimes((char *)fname, (const struct timeval *)&tvp);
# endif
# endif
# endif
}
#endif /* UNIX */
#if defined(VMS) && !defined(MIN)
/* Older DECC compiler for VAX doesn't define MIN() */
# define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
/*
* Return TRUE if a file appears to be read-only from the file permissions.
*/
int
check_file_readonly(
char_u *fname, /* full path to file */
int perm) /* known permissions on file */
{
#ifndef USE_MCH_ACCESS
int fd = 0;
#endif
return (
#ifdef USE_MCH_ACCESS
# ifdef UNIX
(perm & 0222) == 0 ||
# endif
mch_access((char *)fname, W_OK)
#else
(fd = mch_open((char *)fname, O_RDWR | O_EXTRA, 0)) < 0
? TRUE : (close(fd), FALSE)
#endif
);
}
/*
* buf_write() - write to file "fname" lines "start" through "end"
*
* We do our own buffering here because fwrite() is so slow.
*
* If "forceit" is true, we don't care for errors when attempting backups.
* In case of an error everything possible is done to restore the original
* file. But when "forceit" is TRUE, we risk losing it.
*
* When "reset_changed" is TRUE and "append" == FALSE and "start" == 1 and
* "end" == curbuf->b_ml.ml_line_count, reset curbuf->b_changed.
*
* This function must NOT use NameBuff (because it's called by autowrite()).
*
* return FAIL for failure, OK otherwise
*/
int
buf_write(
buf_T *buf,
char_u *fname,
char_u *sfname,
linenr_T start,
linenr_T end,
exarg_T *eap, /* for forced 'ff' and 'fenc', can be
NULL! */
int append, /* append to the file */
int forceit,
int reset_changed,
int filtering)
{
int fd;
char_u *backup = NULL;
int backup_copy = FALSE; /* copy the original file? */
int dobackup;
char_u *ffname;
char_u *wfname = NULL; /* name of file to write to */
char_u *s;
char_u *ptr;
char_u c;
int len;
linenr_T lnum;
long nchars;
char_u *errmsg = NULL;
int errmsg_allocated = FALSE;
char_u *errnum = NULL;
char_u *buffer;
char_u smallbuf[SMBUFSIZE];
char_u *backup_ext;
int bufsize;
long perm; /* file permissions */
int retval = OK;
int newfile = FALSE; /* TRUE if file doesn't exist yet */
int msg_save = msg_scroll;
int overwriting; /* TRUE if writing over original */
int no_eol = FALSE; /* no end-of-line written */
int device = FALSE; /* writing to a device */
stat_T st_old;
int prev_got_int = got_int;
int checking_conversion;
int file_readonly = FALSE; /* overwritten file is read-only */
static char *err_readonly = "is read-only (cannot override: \"W\" in 'cpoptions')";
#if defined(UNIX) /*XXX fix me sometime? */
int made_writable = FALSE; /* 'w' bit has been set */
#endif
/* writing everything */
int whole = (start == 1 && end == buf->b_ml.ml_line_count);
#ifdef FEAT_AUTOCMD
linenr_T old_line_count = buf->b_ml.ml_line_count;
#endif
int attr;
int fileformat;
int write_bin;
struct bw_info write_info; /* info for buf_write_bytes() */
#ifdef FEAT_MBYTE
int converted = FALSE;
int notconverted = FALSE;
char_u *fenc; /* effective 'fileencoding' */
char_u *fenc_tofree = NULL; /* allocated "fenc" */
#endif
#ifdef HAS_BW_FLAGS
int wb_flags = 0;
#endif
#ifdef HAVE_ACL
vim_acl_T acl = NULL; /* ACL copied from original file to
backup or new file */
#endif
#ifdef FEAT_PERSISTENT_UNDO
int write_undo_file = FALSE;
context_sha256_T sha_ctx;
#endif
unsigned int bkc = get_bkc_value(buf);
if (fname == NULL || *fname == NUL) /* safety check */
return FAIL;
if (buf->b_ml.ml_mfp == NULL)
{
/* This can happen during startup when there is a stray "w" in the
* vimrc file. */
EMSG(_(e_emptybuf));
return FAIL;
}
/*
* Disallow writing from .exrc and .vimrc in current directory for
* security reasons.
*/
if (check_secure())
return FAIL;
/* Avoid a crash for a long name. */
if (STRLEN(fname) >= MAXPATHL)
{
EMSG(_(e_longname));
return FAIL;
}
#ifdef FEAT_MBYTE
/* must init bw_conv_buf and bw_iconv_fd before jumping to "fail" */
write_info.bw_conv_buf = NULL;
write_info.bw_conv_error = FALSE;
write_info.bw_conv_error_lnum = 0;
write_info.bw_restlen = 0;
# ifdef USE_ICONV
write_info.bw_iconv_fd = (iconv_t)-1;
# endif
#endif
#ifdef FEAT_CRYPT
write_info.bw_buffer = buf;
#endif
/* After writing a file changedtick changes but we don't want to display
* the line. */
ex_no_reprint = TRUE;
/*
* If there is no file name yet, use the one for the written file.
* BF_NOTEDITED is set to reflect this (in case the write fails).
* Don't do this when the write is for a filter command.
* Don't do this when appending.
* Only do this when 'cpoptions' contains the 'F' flag.
*/
if (buf->b_ffname == NULL
&& reset_changed
&& whole
&& buf == curbuf
#ifdef FEAT_QUICKFIX
&& !bt_nofile(buf)
#endif
&& !filtering
&& (!append || vim_strchr(p_cpo, CPO_FNAMEAPP) != NULL)
&& vim_strchr(p_cpo, CPO_FNAMEW) != NULL)
{
if (set_rw_fname(fname, sfname) == FAIL)
return FAIL;
buf = curbuf; /* just in case autocmds made "buf" invalid */
}
if (sfname == NULL)
sfname = fname;
/*
* For Unix: Use the short file name whenever possible.
* Avoids problems with networks and when directory names are changed.
* Don't do this for MS-DOS, a "cd" in a sub-shell may have moved us to
* another directory, which we don't detect
*/
ffname = fname; /* remember full fname */
#ifdef UNIX
fname = sfname;
#endif
if (buf->b_ffname != NULL && fnamecmp(ffname, buf->b_ffname) == 0)
overwriting = TRUE;
else
overwriting = FALSE;
if (exiting)
settmode(TMODE_COOK); /* when exiting allow typeahead now */
++no_wait_return; /* don't wait for return yet */
/*
* Set '[ and '] marks to the lines to be written.
*/
buf->b_op_start.lnum = start;
buf->b_op_start.col = 0;
buf->b_op_end.lnum = end;
buf->b_op_end.col = 0;
#ifdef FEAT_AUTOCMD
{
aco_save_T aco;
int buf_ffname = FALSE;
int buf_sfname = FALSE;
int buf_fname_f = FALSE;
int buf_fname_s = FALSE;
int did_cmd = FALSE;
int nofile_err = FALSE;
int empty_memline = (buf->b_ml.ml_mfp == NULL);
bufref_T bufref;
/*
* Apply PRE autocommands.
* Set curbuf to the buffer to be written.
* Careful: The autocommands may call buf_write() recursively!
*/
if (ffname == buf->b_ffname)
buf_ffname = TRUE;
if (sfname == buf->b_sfname)
buf_sfname = TRUE;
if (fname == buf->b_ffname)
buf_fname_f = TRUE;
if (fname == buf->b_sfname)
buf_fname_s = TRUE;
/* set curwin/curbuf to buf and save a few things */
aucmd_prepbuf(&aco, buf);
set_bufref(&bufref, buf);
if (append)
{
if (!(did_cmd = apply_autocmds_exarg(EVENT_FILEAPPENDCMD,
sfname, sfname, FALSE, curbuf, eap)))
{
#ifdef FEAT_QUICKFIX
if (overwriting && bt_nofile(curbuf))
nofile_err = TRUE;
else
#endif
apply_autocmds_exarg(EVENT_FILEAPPENDPRE,
sfname, sfname, FALSE, curbuf, eap);
}
}
else if (filtering)
{
apply_autocmds_exarg(EVENT_FILTERWRITEPRE,
NULL, sfname, FALSE, curbuf, eap);
}
else if (reset_changed && whole)
{
int was_changed = curbufIsChanged();
did_cmd = apply_autocmds_exarg(EVENT_BUFWRITECMD,
sfname, sfname, FALSE, curbuf, eap);
if (did_cmd)
{
if (was_changed && !curbufIsChanged())
{
/* Written everything correctly and BufWriteCmd has reset
* 'modified': Correct the undo information so that an
* undo now sets 'modified'. */
u_unchanged(curbuf);
u_update_save_nr(curbuf);
}
}
else
{
#ifdef FEAT_QUICKFIX
if (overwriting && bt_nofile(curbuf))
nofile_err = TRUE;
else
#endif
apply_autocmds_exarg(EVENT_BUFWRITEPRE,
sfname, sfname, FALSE, curbuf, eap);
}
}
else
{
if (!(did_cmd = apply_autocmds_exarg(EVENT_FILEWRITECMD,
sfname, sfname, FALSE, curbuf, eap)))
{
#ifdef FEAT_QUICKFIX
if (overwriting && bt_nofile(curbuf))
nofile_err = TRUE;
else
#endif
apply_autocmds_exarg(EVENT_FILEWRITEPRE,
sfname, sfname, FALSE, curbuf, eap);
}
}
/* restore curwin/curbuf and a few other things */
aucmd_restbuf(&aco);
/*
* In three situations we return here and don't write the file:
* 1. the autocommands deleted or unloaded the buffer.
* 2. The autocommands abort script processing.
* 3. If one of the "Cmd" autocommands was executed.
*/
if (!bufref_valid(&bufref))
buf = NULL;
if (buf == NULL || (buf->b_ml.ml_mfp == NULL && !empty_memline)
|| did_cmd || nofile_err
#ifdef FEAT_EVAL
|| aborting()
#endif
)
{
--no_wait_return;
msg_scroll = msg_save;
if (nofile_err)
EMSG(_("E676: No matching autocommands for acwrite buffer"));
if (nofile_err
#ifdef FEAT_EVAL
|| aborting()
#endif
)
/* An aborting error, interrupt or exception in the
* autocommands. */
return FAIL;
if (did_cmd)
{
if (buf == NULL)
/* The buffer was deleted. We assume it was written
* (can't retry anyway). */
return OK;
if (overwriting)
{
/* Assume the buffer was written, update the timestamp. */
ml_timestamp(buf);
if (append)
buf->b_flags &= ~BF_NEW;
else
buf->b_flags &= ~BF_WRITE_MASK;
}
if (reset_changed && buf->b_changed && !append
&& (overwriting || vim_strchr(p_cpo, CPO_PLUS) != NULL))
/* Buffer still changed, the autocommands didn't work
* properly. */
return FAIL;
return OK;
}
#ifdef FEAT_EVAL
if (!aborting())
#endif
EMSG(_("E203: Autocommands deleted or unloaded buffer to be written"));
return FAIL;
}
/*
* The autocommands may have changed the number of lines in the file.
* When writing the whole file, adjust the end.
* When writing part of the file, assume that the autocommands only
* changed the number of lines that are to be written (tricky!).
*/
if (buf->b_ml.ml_line_count != old_line_count)
{
if (whole) /* write all */
end = buf->b_ml.ml_line_count;
else if (buf->b_ml.ml_line_count > old_line_count) /* more lines */
end += buf->b_ml.ml_line_count - old_line_count;
else /* less lines */
{
end -= old_line_count - buf->b_ml.ml_line_count;
if (end < start)
{
--no_wait_return;
msg_scroll = msg_save;
EMSG(_("E204: Autocommand changed number of lines in unexpected way"));
return FAIL;
}
}
}
/*
* The autocommands may have changed the name of the buffer, which may
* be kept in fname, ffname and sfname.
*/
if (buf_ffname)
ffname = buf->b_ffname;
if (buf_sfname)
sfname = buf->b_sfname;
if (buf_fname_f)
fname = buf->b_ffname;
if (buf_fname_s)
fname = buf->b_sfname;
}
#endif
#ifdef FEAT_NETBEANS_INTG
if (netbeans_active() && isNetbeansBuffer(buf))
{
if (whole)
{
/*
* b_changed can be 0 after an undo, but we still need to write
* the buffer to NetBeans.
*/
if (buf->b_changed || isNetbeansModified(buf))
{
--no_wait_return; /* may wait for return now */
msg_scroll = msg_save;
netbeans_save_buffer(buf); /* no error checking... */
return retval;
}
else
{
errnum = (char_u *)"E656: ";
errmsg = (char_u *)_("NetBeans disallows writes of unmodified buffers");
buffer = NULL;
goto fail;
}
}
else
{
errnum = (char_u *)"E657: ";
errmsg = (char_u *)_("Partial writes disallowed for NetBeans buffers");
buffer = NULL;
goto fail;
}
}
#endif
if (shortmess(SHM_OVER) && !exiting)
msg_scroll = FALSE; /* overwrite previous file message */
else
msg_scroll = TRUE; /* don't overwrite previous file message */
if (!filtering)
filemess(buf,
#ifndef UNIX
sfname,
#else
fname,
#endif
(char_u *)"", 0); /* show that we are busy */
msg_scroll = FALSE; /* always overwrite the file message now */
buffer = alloc(BUFSIZE);
if (buffer == NULL) /* can't allocate big buffer, use small
* one (to be able to write when out of
* memory) */
{
buffer = smallbuf;
bufsize = SMBUFSIZE;
}
else
bufsize = BUFSIZE;
/*
* Get information about original file (if there is one).
*/
#if defined(UNIX)
st_old.st_dev = 0;
st_old.st_ino = 0;
perm = -1;
if (mch_stat((char *)fname, &st_old) < 0)
newfile = TRUE;
else
{
perm = st_old.st_mode;
if (!S_ISREG(st_old.st_mode)) /* not a file */
{
if (S_ISDIR(st_old.st_mode))
{
errnum = (char_u *)"E502: ";
errmsg = (char_u *)_("is a directory");
goto fail;
}
if (mch_nodetype(fname) != NODE_WRITABLE)
{
errnum = (char_u *)"E503: ";
errmsg = (char_u *)_("is not a file or writable device");
goto fail;
}
/* It's a device of some kind (or a fifo) which we can write to
* but for which we can't make a backup. */
device = TRUE;
newfile = TRUE;
perm = -1;
}
}
#else /* !UNIX */
/*
* Check for a writable device name.
*/
c = mch_nodetype(fname);
if (c == NODE_OTHER)
{
errnum = (char_u *)"E503: ";
errmsg = (char_u *)_("is not a file or writable device");
goto fail;
}
if (c == NODE_WRITABLE)
{
# if defined(MSWIN)
/* MS-Windows allows opening a device, but we will probably get stuck
* trying to write to it. */
if (!p_odev)
{
errnum = (char_u *)"E796: ";
errmsg = (char_u *)_("writing to device disabled with 'opendevice' option");
goto fail;
}
# endif
device = TRUE;
newfile = TRUE;
perm = -1;
}
else
{
perm = mch_getperm(fname);
if (perm < 0)
newfile = TRUE;
else if (mch_isdir(fname))
{
errnum = (char_u *)"E502: ";
errmsg = (char_u *)_("is a directory");
goto fail;
}
if (overwriting)
(void)mch_stat((char *)fname, &st_old);
}
#endif /* !UNIX */
if (!device && !newfile)
{
/*
* Check if the file is really writable (when renaming the file to
* make a backup we won't discover it later).
*/
file_readonly = check_file_readonly(fname, (int)perm);
if (!forceit && file_readonly)
{
if (vim_strchr(p_cpo, CPO_FWRITE) != NULL)
{
errnum = (char_u *)"E504: ";
errmsg = (char_u *)_(err_readonly);
}
else
{
errnum = (char_u *)"E505: ";
errmsg = (char_u *)_("is read-only (add ! to override)");
}
goto fail;
}
/*
* Check if the timestamp hasn't changed since reading the file.
*/
if (overwriting)
{
retval = check_mtime(buf, &st_old);
if (retval == FAIL)
goto fail;
}
}
#ifdef HAVE_ACL
/*
* For systems that support ACL: get the ACL from the original file.
*/
if (!newfile)
acl = mch_get_acl(fname);
#endif
/*
* If 'backupskip' is not empty, don't make a backup for some files.
*/
dobackup = (p_wb || p_bk || *p_pm != NUL);
#ifdef FEAT_WILDIGN
if (dobackup && *p_bsk != NUL && match_file_list(p_bsk, sfname, ffname))
dobackup = FALSE;
#endif
/*
* Save the value of got_int and reset it. We don't want a previous
* interruption cancel writing, only hitting CTRL-C while writing should
* abort it.
*/
prev_got_int = got_int;
got_int = FALSE;
/* Mark the buffer as 'being saved' to prevent changed buffer warnings */
buf->b_saving = TRUE;
/*
* If we are not appending or filtering, the file exists, and the
* 'writebackup', 'backup' or 'patchmode' option is set, need a backup.
* When 'patchmode' is set also make a backup when appending.
*
* Do not make any backup, if 'writebackup' and 'backup' are both switched
* off. This helps when editing large files on almost-full disks.
*/
if (!(append && *p_pm == NUL) && !filtering && perm >= 0 && dobackup)
{
#if defined(UNIX) || defined(WIN32)
stat_T st;
#endif
if ((bkc & BKC_YES) || append) /* "yes" */
backup_copy = TRUE;
#if defined(UNIX) || defined(WIN32)
else if ((bkc & BKC_AUTO)) /* "auto" */
{
int i;
# ifdef UNIX
/*
* Don't rename the file when:
* - it's a hard link
* - it's a symbolic link
* - we don't have write permission in the directory
* - we can't set the owner/group of the new file
*/
if (st_old.st_nlink > 1
|| mch_lstat((char *)fname, &st) < 0
|| st.st_dev != st_old.st_dev
|| st.st_ino != st_old.st_ino
# ifndef HAVE_FCHOWN
|| st.st_uid != st_old.st_uid
|| st.st_gid != st_old.st_gid
# endif
)
backup_copy = TRUE;
else
# else
# ifdef WIN32
/* On NTFS file systems hard links are possible. */
if (mch_is_linked(fname))
backup_copy = TRUE;
else
# endif
# endif
{
/*
* Check if we can create a file and set the owner/group to
* the ones from the original file.
* First find a file name that doesn't exist yet (use some
* arbitrary numbers).
*/
STRCPY(IObuff, fname);
for (i = 4913; ; i += 123)
{
sprintf((char *)gettail(IObuff), "%d", i);
if (mch_lstat((char *)IObuff, &st) < 0)
break;
}
fd = mch_open((char *)IObuff,
O_CREAT|O_WRONLY|O_EXCL|O_NOFOLLOW, perm);
if (fd < 0) /* can't write in directory */
backup_copy = TRUE;
else
{
# ifdef UNIX
# ifdef HAVE_FCHOWN
ignored = fchown(fd, st_old.st_uid, st_old.st_gid);
# endif
if (mch_stat((char *)IObuff, &st) < 0
|| st.st_uid != st_old.st_uid
|| st.st_gid != st_old.st_gid
|| (long)st.st_mode != perm)
backup_copy = TRUE;
# endif
/* Close the file before removing it, on MS-Windows we
* can't delete an open file. */
close(fd);
mch_remove(IObuff);
# ifdef MSWIN
/* MS-Windows may trigger a virus scanner to open the
* file, we can't delete it then. Keep trying for half a
* second. */
{
int try;
for (try = 0; try < 10; ++try)
{
if (mch_lstat((char *)IObuff, &st) < 0)
break;
ui_delay(50L, TRUE); /* wait 50 msec */
mch_remove(IObuff);
}
}
# endif
}
}
}
/*
* Break symlinks and/or hardlinks if we've been asked to.
*/
if ((bkc & BKC_BREAKSYMLINK) || (bkc & BKC_BREAKHARDLINK))
{
# ifdef UNIX
int lstat_res;
lstat_res = mch_lstat((char *)fname, &st);
/* Symlinks. */
if ((bkc & BKC_BREAKSYMLINK)
&& lstat_res == 0
&& st.st_ino != st_old.st_ino)
backup_copy = FALSE;
/* Hardlinks. */
if ((bkc & BKC_BREAKHARDLINK)
&& st_old.st_nlink > 1
&& (lstat_res != 0 || st.st_ino == st_old.st_ino))
backup_copy = FALSE;
# else
# if defined(WIN32)
/* Symlinks. */
if ((bkc & BKC_BREAKSYMLINK) && mch_is_symbolic_link(fname))
backup_copy = FALSE;
/* Hardlinks. */
if ((bkc & BKC_BREAKHARDLINK) && mch_is_hard_link(fname))
backup_copy = FALSE;
# endif
# endif
}
#endif
/* make sure we have a valid backup extension to use */
if (*p_bex == NUL)
backup_ext = (char_u *)".bak";
else
backup_ext = p_bex;
if (backup_copy
&& (fd = mch_open((char *)fname, O_RDONLY | O_EXTRA, 0)) >= 0)
{
int bfd;
char_u *copybuf, *wp;
int some_error = FALSE;
stat_T st_new;
char_u *dirp;
char_u *rootname;
#if defined(UNIX)
int did_set_shortname;
#endif
copybuf = alloc(BUFSIZE + 1);
if (copybuf == NULL)
{
some_error = TRUE; /* out of memory */
goto nobackup;
}
/*
* Try to make the backup in each directory in the 'bdir' option.
*
* Unix semantics has it, that we may have a writable file,
* that cannot be recreated with a simple open(..., O_CREAT, ) e.g:
* - the directory is not writable,
* - the file may be a symbolic link,
* - the file may belong to another user/group, etc.
*
* For these reasons, the existing writable file must be truncated
* and reused. Creation of a backup COPY will be attempted.
*/
dirp = p_bdir;
while (*dirp)
{
#ifdef UNIX
st_new.st_ino = 0;
st_new.st_dev = 0;
st_new.st_gid = 0;
#endif
/*
* Isolate one directory name, using an entry in 'bdir'.
*/
(void)copy_option_part(&dirp, copybuf, BUFSIZE, ",");
rootname = get_file_in_dir(fname, copybuf);
if (rootname == NULL)
{
some_error = TRUE; /* out of memory */
goto nobackup;
}
#if defined(UNIX)
did_set_shortname = FALSE;
#endif
/*
* May try twice if 'shortname' not set.
*/
for (;;)
{
/*
* Make backup file name.
*/
backup = buf_modname((buf->b_p_sn || buf->b_shortname),
rootname, backup_ext, FALSE);
if (backup == NULL)
{
vim_free(rootname);
some_error = TRUE; /* out of memory */
goto nobackup;
}
/*
* Check if backup file already exists.
*/
if (mch_stat((char *)backup, &st_new) >= 0)
{
#ifdef UNIX
/*
* Check if backup file is same as original file.
* May happen when modname() gave the same file back.
* E.g. silly link, or file name-length reached.
* If we don't check here, we either ruin the file
* when copying or erase it after writing. jw.
*/
if (st_new.st_dev == st_old.st_dev
&& st_new.st_ino == st_old.st_ino)
{
vim_free(backup);
backup = NULL; /* no backup file to delete */
/*
* may try again with 'shortname' set
*/
if (!(buf->b_shortname || buf->b_p_sn))
{
buf->b_shortname = TRUE;
did_set_shortname = TRUE;
continue;
}
/* setting shortname didn't help */
if (did_set_shortname)
buf->b_shortname = FALSE;
break;
}
#endif
/*
* If we are not going to keep the backup file, don't
* delete an existing one, try to use another name.
* Change one character, just before the extension.
*/
if (!p_bk)
{
wp = backup + STRLEN(backup) - 1
- STRLEN(backup_ext);
if (wp < backup) /* empty file name ??? */
wp = backup;
*wp = 'z';
while (*wp > 'a'
&& mch_stat((char *)backup, &st_new) >= 0)
--*wp;
/* They all exist??? Must be something wrong. */
if (*wp == 'a')
{
vim_free(backup);
backup = NULL;
}
}
}
break;
}
vim_free(rootname);
/*
* Try to create the backup file
*/
if (backup != NULL)
{
/* remove old backup, if present */
mch_remove(backup);
/* Open with O_EXCL to avoid the file being created while
* we were sleeping (symlink hacker attack?) */
bfd = mch_open((char *)backup,
O_WRONLY|O_CREAT|O_EXTRA|O_EXCL|O_NOFOLLOW,
perm & 0777);
if (bfd < 0)
{
vim_free(backup);
backup = NULL;
}
else
{
/* set file protection same as original file, but
* strip s-bit */
(void)mch_setperm(backup, perm & 0777);
#ifdef UNIX
/*
* Try to set the group of the backup same as the
* original file. If this fails, set the protection
* bits for the group same as the protection bits for
* others.
*/
if (st_new.st_gid != st_old.st_gid
# ifdef HAVE_FCHOWN /* sequent-ptx lacks fchown() */
&& fchown(bfd, (uid_t)-1, st_old.st_gid) != 0
# endif
)
mch_setperm(backup,
(perm & 0707) | ((perm & 07) << 3));
# if defined(HAVE_SELINUX) || defined(HAVE_SMACK)
mch_copy_sec(fname, backup);
# endif
#endif
/*
* copy the file.
*/
write_info.bw_fd = bfd;
write_info.bw_buf = copybuf;
#ifdef HAS_BW_FLAGS
write_info.bw_flags = FIO_NOCONVERT;
#endif
while ((write_info.bw_len = read_eintr(fd, copybuf,
BUFSIZE)) > 0)
{
if (buf_write_bytes(&write_info) == FAIL)
{
errmsg = (char_u *)_("E506: Can't write to backup file (add ! to override)");
break;
}
ui_breakcheck();
if (got_int)
{
errmsg = (char_u *)_(e_interr);
break;
}
}
if (close(bfd) < 0 && errmsg == NULL)
errmsg = (char_u *)_("E507: Close error for backup file (add ! to override)");
if (write_info.bw_len < 0)
errmsg = (char_u *)_("E508: Can't read file for backup (add ! to override)");
#ifdef UNIX
set_file_time(backup, st_old.st_atime, st_old.st_mtime);
#endif
#ifdef HAVE_ACL
mch_set_acl(backup, acl);
#endif
#if defined(HAVE_SELINUX) || defined(HAVE_SMACK)
mch_copy_sec(fname, backup);
#endif
break;
}
}
}
nobackup:
close(fd); /* ignore errors for closing read file */
vim_free(copybuf);
if (backup == NULL && errmsg == NULL)
errmsg = (char_u *)_("E509: Cannot create backup file (add ! to override)");
/* ignore errors when forceit is TRUE */
if ((some_error || errmsg != NULL) && !forceit)
{
retval = FAIL;
goto fail;
}
errmsg = NULL;
}
else
{
char_u *dirp;
char_u *p;
char_u *rootname;
/*
* Make a backup by renaming the original file.
*/
/*
* If 'cpoptions' includes the "W" flag, we don't want to
* overwrite a read-only file. But rename may be possible
* anyway, thus we need an extra check here.
*/
if (file_readonly && vim_strchr(p_cpo, CPO_FWRITE) != NULL)
{
errnum = (char_u *)"E504: ";
errmsg = (char_u *)_(err_readonly);
goto fail;
}
/*
*
* Form the backup file name - change path/fo.o.h to
* path/fo.o.h.bak Try all directories in 'backupdir', first one
* that works is used.
*/
dirp = p_bdir;
while (*dirp)
{
/*
* Isolate one directory name and make the backup file name.
*/
(void)copy_option_part(&dirp, IObuff, IOSIZE, ",");
rootname = get_file_in_dir(fname, IObuff);
if (rootname == NULL)
backup = NULL;
else
{
backup = buf_modname((buf->b_p_sn || buf->b_shortname),
rootname, backup_ext, FALSE);
vim_free(rootname);
}
if (backup != NULL)
{
/*
* If we are not going to keep the backup file, don't
* delete an existing one, try to use another name.
* Change one character, just before the extension.
*/
if (!p_bk && mch_getperm(backup) >= 0)
{
p = backup + STRLEN(backup) - 1 - STRLEN(backup_ext);
if (p < backup) /* empty file name ??? */
p = backup;
*p = 'z';
while (*p > 'a' && mch_getperm(backup) >= 0)
--*p;
/* They all exist??? Must be something wrong! */
if (*p == 'a')
{
vim_free(backup);
backup = NULL;
}
}
}
if (backup != NULL)
{
/*
* Delete any existing backup and move the current version
* to the backup. For safety, we don't remove the backup
* until the write has finished successfully. And if the
* 'backup' option is set, leave it around.
*/
/*
* If the renaming of the original file to the backup file
* works, quit here.
*/
if (vim_rename(fname, backup) == 0)
break;
vim_free(backup); /* don't do the rename below */
backup = NULL;
}
}
if (backup == NULL && !forceit)
{
errmsg = (char_u *)_("E510: Can't make backup file (add ! to override)");
goto fail;
}
}
}
#if defined(UNIX)
/* When using ":w!" and the file was read-only: make it writable */
if (forceit && perm >= 0 && !(perm & 0200) && st_old.st_uid == getuid()
&& vim_strchr(p_cpo, CPO_FWRITE) == NULL)
{
perm |= 0200;
(void)mch_setperm(fname, perm);
made_writable = TRUE;
}
#endif
/* When using ":w!" and writing to the current file, 'readonly' makes no
* sense, reset it, unless 'Z' appears in 'cpoptions'. */
if (forceit && overwriting && vim_strchr(p_cpo, CPO_KEEPRO) == NULL)
{
buf->b_p_ro = FALSE;
#ifdef FEAT_TITLE
need_maketitle = TRUE; /* set window title later */
#endif
status_redraw_all(); /* redraw status lines later */
}
if (end > buf->b_ml.ml_line_count)
end = buf->b_ml.ml_line_count;
if (buf->b_ml.ml_flags & ML_EMPTY)
start = end + 1;
/*
* If the original file is being overwritten, there is a small chance that
* we crash in the middle of writing. Therefore the file is preserved now.
* This makes all block numbers positive so that recovery does not need
* the original file.
* Don't do this if there is a backup file and we are exiting.
*/
if (reset_changed && !newfile && overwriting
&& !(exiting && backup != NULL))
{
ml_preserve(buf, FALSE);
if (got_int)
{
errmsg = (char_u *)_(e_interr);
goto restore_backup;
}
}
#ifdef VMS
vms_remove_version(fname); /* remove version */
#endif
/* Default: write the file directly. May write to a temp file for
* multi-byte conversion. */
wfname = fname;
#ifdef FEAT_MBYTE
/* Check for forced 'fileencoding' from "++opt=val" argument. */
if (eap != NULL && eap->force_enc != 0)
{
fenc = eap->cmd + eap->force_enc;
fenc = enc_canonize(fenc);
fenc_tofree = fenc;
}
else
fenc = buf->b_p_fenc;
/*
* Check if the file needs to be converted.
*/
converted = need_conversion(fenc);
/*
* Check if UTF-8 to UCS-2/4 or Latin1 conversion needs to be done. Or
* Latin1 to Unicode conversion. This is handled in buf_write_bytes().
* Prepare the flags for it and allocate bw_conv_buf when needed.
*/
if (converted && (enc_utf8 || STRCMP(p_enc, "latin1") == 0))
{
wb_flags = get_fio_flags(fenc);
if (wb_flags & (FIO_UCS2 | FIO_UCS4 | FIO_UTF16 | FIO_UTF8))
{
/* Need to allocate a buffer to translate into. */
if (wb_flags & (FIO_UCS2 | FIO_UTF16 | FIO_UTF8))
write_info.bw_conv_buflen = bufsize * 2;
else /* FIO_UCS4 */
write_info.bw_conv_buflen = bufsize * 4;
write_info.bw_conv_buf
= lalloc((long_u)write_info.bw_conv_buflen, TRUE);
if (write_info.bw_conv_buf == NULL)
end = 0;
}
}
# ifdef WIN3264
if (converted && wb_flags == 0 && (wb_flags = get_win_fio_flags(fenc)) != 0)
{
/* Convert UTF-8 -> UCS-2 and UCS-2 -> DBCS. Worst-case * 4: */
write_info.bw_conv_buflen = bufsize * 4;
write_info.bw_conv_buf
= lalloc((long_u)write_info.bw_conv_buflen, TRUE);
if (write_info.bw_conv_buf == NULL)
end = 0;
}
# endif
# ifdef MACOS_CONVERT
if (converted && wb_flags == 0 && (wb_flags = get_mac_fio_flags(fenc)) != 0)
{
write_info.bw_conv_buflen = bufsize * 3;
write_info.bw_conv_buf
= lalloc((long_u)write_info.bw_conv_buflen, TRUE);
if (write_info.bw_conv_buf == NULL)
end = 0;
}
# endif
# if defined(FEAT_EVAL) || defined(USE_ICONV)
if (converted && wb_flags == 0)
{
# ifdef USE_ICONV
/*
* Use iconv() conversion when conversion is needed and it's not done
* internally.
*/
write_info.bw_iconv_fd = (iconv_t)my_iconv_open(fenc,
enc_utf8 ? (char_u *)"utf-8" : p_enc);
if (write_info.bw_iconv_fd != (iconv_t)-1)
{
/* We're going to use iconv(), allocate a buffer to convert in. */
write_info.bw_conv_buflen = bufsize * ICONV_MULT;
write_info.bw_conv_buf
= lalloc((long_u)write_info.bw_conv_buflen, TRUE);
if (write_info.bw_conv_buf == NULL)
end = 0;
write_info.bw_first = TRUE;
}
# ifdef FEAT_EVAL
else
# endif
# endif
# ifdef FEAT_EVAL
/*
* When the file needs to be converted with 'charconvert' after
* writing, write to a temp file instead and let the conversion
* overwrite the original file.
*/
if (*p_ccv != NUL)
{
wfname = vim_tempname('w', FALSE);
if (wfname == NULL) /* Can't write without a tempfile! */
{
errmsg = (char_u *)_("E214: Can't find temp file for writing");
goto restore_backup;
}
}
# endif
}
# endif
if (converted && wb_flags == 0
# ifdef USE_ICONV
&& write_info.bw_iconv_fd == (iconv_t)-1
# endif
# ifdef FEAT_EVAL
&& wfname == fname
# endif
)
{
if (!forceit)
{
errmsg = (char_u *)_("E213: Cannot convert (add ! to write without conversion)");
goto restore_backup;
}
notconverted = TRUE;
}
#endif
/*
* If conversion is taking place, we may first pretend to write and check
* for conversion errors. Then loop again to write for real.
* When not doing conversion this writes for real right away.
*/
for (checking_conversion = TRUE; ; checking_conversion = FALSE)
{
/*
* There is no need to check conversion when:
* - there is no conversion
* - we make a backup file, that can be restored in case of conversion
* failure.
*/
#ifdef FEAT_MBYTE
if (!converted || dobackup)
#endif
checking_conversion = FALSE;
if (checking_conversion)
{
/* Make sure we don't write anything. */
fd = -1;
write_info.bw_fd = fd;
}
else
{
/*
* Open the file "wfname" for writing.
* We may try to open the file twice: If we can't write to the file
* and forceit is TRUE we delete the existing file and try to
* create a new one. If this still fails we may have lost the
* original file! (this may happen when the user reached his
* quotum for number of files).
* Appending will fail if the file does not exist and forceit is
* FALSE.
*/
while ((fd = mch_open((char *)wfname, O_WRONLY | O_EXTRA | (append
? (forceit ? (O_APPEND | O_CREAT) : O_APPEND)
: (O_CREAT | O_TRUNC))
, perm < 0 ? 0666 : (perm & 0777))) < 0)
{
/*
* A forced write will try to create a new file if the old one
* is still readonly. This may also happen when the directory
* is read-only. In that case the mch_remove() will fail.
*/
if (errmsg == NULL)
{
#ifdef UNIX
stat_T st;
/* Don't delete the file when it's a hard or symbolic link.
*/
if ((!newfile && st_old.st_nlink > 1)
|| (mch_lstat((char *)fname, &st) == 0
&& (st.st_dev != st_old.st_dev
|| st.st_ino != st_old.st_ino)))
errmsg = (char_u *)_("E166: Can't open linked file for writing");
else
#endif
{
errmsg = (char_u *)_("E212: Can't open file for writing");
if (forceit && vim_strchr(p_cpo, CPO_FWRITE) == NULL
&& perm >= 0)
{
#ifdef UNIX
/* we write to the file, thus it should be marked
writable after all */
if (!(perm & 0200))
made_writable = TRUE;
perm |= 0200;
if (st_old.st_uid != getuid()
|| st_old.st_gid != getgid())
perm &= 0777;
#endif
if (!append) /* don't remove when appending */
mch_remove(wfname);
continue;
}
}
}
restore_backup:
{
stat_T st;
/*
* If we failed to open the file, we don't need a backup.
* Throw it away. If we moved or removed the original file
* try to put the backup in its place.
*/
if (backup != NULL && wfname == fname)
{
if (backup_copy)
{
/*
* There is a small chance that we removed the
* original, try to move the copy in its place.
* This may not work if the vim_rename() fails.
* In that case we leave the copy around.
*/
/* If file does not exist, put the copy in its
* place */
if (mch_stat((char *)fname, &st) < 0)
vim_rename(backup, fname);
/* if original file does exist throw away the copy
*/
if (mch_stat((char *)fname, &st) >= 0)
mch_remove(backup);
}
else
{
/* try to put the original file back */
vim_rename(backup, fname);
}
}
/* if original file no longer exists give an extra warning
*/
if (!newfile && mch_stat((char *)fname, &st) < 0)
end = 0;
}
#ifdef FEAT_MBYTE
if (wfname != fname)
vim_free(wfname);
#endif
goto fail;
}
write_info.bw_fd = fd;
#if defined(WIN3264)
if (backup != NULL && overwriting && !append)
{
if (backup_copy)
(void)mch_copy_file_attribute(wfname, backup);
else
(void)mch_copy_file_attribute(backup, wfname);
}
if (!overwriting && !append)
{
if (buf->b_ffname != NULL)
(void)mch_copy_file_attribute(buf->b_ffname, wfname);
/* Should copy resource fork */
}
#endif
#ifdef FEAT_CRYPT
if (*buf->b_p_key != NUL && !filtering)
{
char_u *header;
int header_len;
buf->b_cryptstate = crypt_create_for_writing(
crypt_get_method_nr(buf),
buf->b_p_key, &header, &header_len);
if (buf->b_cryptstate == NULL || header == NULL)
end = 0;
else
{
/* Write magic number, so that Vim knows how this file is
* encrypted when reading it back. */
write_info.bw_buf = header;
write_info.bw_len = header_len;
write_info.bw_flags = FIO_NOCONVERT;
if (buf_write_bytes(&write_info) == FAIL)
end = 0;
wb_flags |= FIO_ENCRYPTED;
vim_free(header);
}
}
#endif
}
errmsg = NULL;
write_info.bw_buf = buffer;
nchars = 0;
/* use "++bin", "++nobin" or 'binary' */
if (eap != NULL && eap->force_bin != 0)
write_bin = (eap->force_bin == FORCE_BIN);
else
write_bin = buf->b_p_bin;
#ifdef FEAT_MBYTE
/*
* The BOM is written just after the encryption magic number.
* Skip it when appending and the file already existed, the BOM only
* makes sense at the start of the file.
*/
if (buf->b_p_bomb && !write_bin && (!append || perm < 0))
{
write_info.bw_len = make_bom(buffer, fenc);
if (write_info.bw_len > 0)
{
/* don't convert, do encryption */
write_info.bw_flags = FIO_NOCONVERT | wb_flags;
if (buf_write_bytes(&write_info) == FAIL)
end = 0;
else
nchars += write_info.bw_len;
}
}
write_info.bw_start_lnum = start;
#endif
#ifdef FEAT_PERSISTENT_UNDO
write_undo_file = (buf->b_p_udf
&& overwriting
&& !append
&& !filtering
&& reset_changed
&& !checking_conversion);
if (write_undo_file)
/* Prepare for computing the hash value of the text. */
sha256_start(&sha_ctx);
#endif
write_info.bw_len = bufsize;
#ifdef HAS_BW_FLAGS
write_info.bw_flags = wb_flags;
#endif
fileformat = get_fileformat_force(buf, eap);
s = buffer;
len = 0;
for (lnum = start; lnum <= end; ++lnum)
{
/*
* The next while loop is done once for each character written.
* Keep it fast!
*/
ptr = ml_get_buf(buf, lnum, FALSE) - 1;
#ifdef FEAT_PERSISTENT_UNDO
if (write_undo_file)
sha256_update(&sha_ctx, ptr + 1,
(UINT32_T)(STRLEN(ptr + 1) + 1));
#endif
while ((c = *++ptr) != NUL)
{
if (c == NL)
*s = NUL; /* replace newlines with NULs */
else if (c == CAR && fileformat == EOL_MAC)
*s = NL; /* Mac: replace CRs with NLs */
else
*s = c;
++s;
if (++len != bufsize)
continue;
if (buf_write_bytes(&write_info) == FAIL)
{
end = 0; /* write error: break loop */
break;
}
nchars += bufsize;
s = buffer;
len = 0;
#ifdef FEAT_MBYTE
write_info.bw_start_lnum = lnum;
#endif
}
/* write failed or last line has no EOL: stop here */
if (end == 0
|| (lnum == end
&& (write_bin || !buf->b_p_fixeol)
&& (lnum == buf->b_no_eol_lnum
|| (lnum == buf->b_ml.ml_line_count
&& !buf->b_p_eol))))
{
++lnum; /* written the line, count it */
no_eol = TRUE;
break;
}
if (fileformat == EOL_UNIX)
*s++ = NL;
else
{
*s++ = CAR; /* EOL_MAC or EOL_DOS: write CR */
if (fileformat == EOL_DOS) /* write CR-NL */
{
if (++len == bufsize)
{
if (buf_write_bytes(&write_info) == FAIL)
{
end = 0; /* write error: break loop */
break;
}
nchars += bufsize;
s = buffer;
len = 0;
}
*s++ = NL;
}
}
if (++len == bufsize && end)
{
if (buf_write_bytes(&write_info) == FAIL)
{
end = 0; /* write error: break loop */
break;
}
nchars += bufsize;
s = buffer;
len = 0;
ui_breakcheck();
if (got_int)
{
end = 0; /* Interrupted, break loop */
break;
}
}
#ifdef VMS
/*
* On VMS there is a problem: newlines get added when writing
* blocks at a time. Fix it by writing a line at a time.
* This is much slower!
* Explanation: VAX/DECC RTL insists that records in some RMS
* structures end with a newline (carriage return) character, and
* if they don't it adds one.
* With other RMS structures it works perfect without this fix.
*/
if (buf->b_fab_rfm == FAB$C_VFC
|| ((buf->b_fab_rat & (FAB$M_FTN | FAB$M_CR)) != 0))
{
int b2write;
buf->b_fab_mrs = (buf->b_fab_mrs == 0
? MIN(4096, bufsize)
: MIN(buf->b_fab_mrs, bufsize));
b2write = len;
while (b2write > 0)
{
write_info.bw_len = MIN(b2write, buf->b_fab_mrs);
if (buf_write_bytes(&write_info) == FAIL)
{
end = 0;
break;
}
b2write -= MIN(b2write, buf->b_fab_mrs);
}
write_info.bw_len = bufsize;
nchars += len;
s = buffer;
len = 0;
}
#endif
}
if (len > 0 && end > 0)
{
write_info.bw_len = len;
if (buf_write_bytes(&write_info) == FAIL)
end = 0; /* write error */
nchars += len;
}
/* Stop when writing done or an error was encountered. */
if (!checking_conversion || end == 0)
break;
/* If no error happened until now, writing should be ok, so loop to
* really write the buffer. */
}
/* If we started writing, finish writing. Also when an error was
* encountered. */
if (!checking_conversion)
{
#if defined(UNIX) && defined(HAVE_FSYNC)
/*
* On many journalling file systems there is a bug that causes both the
* original and the backup file to be lost when halting the system
* right after writing the file. That's because only the meta-data is
* journalled. Syncing the file slows down the system, but assures it
* has been written to disk and we don't lose it.
* For a device do try the fsync() but don't complain if it does not
* work (could be a pipe).
* If the 'fsync' option is FALSE, don't fsync(). Useful for laptops.
*/
if (p_fs && fsync(fd) != 0 && !device)
{
errmsg = (char_u *)_("E667: Fsync failed");
end = 0;
}
#endif
#if defined(HAVE_SELINUX) || defined(HAVE_SMACK)
/* Probably need to set the security context. */
if (!backup_copy)
mch_copy_sec(backup, wfname);
#endif
#ifdef UNIX
/* When creating a new file, set its owner/group to that of the
* original file. Get the new device and inode number. */
if (backup != NULL && !backup_copy)
{
# ifdef HAVE_FCHOWN
stat_T st;
/* don't change the owner when it's already OK, some systems remove
* permission or ACL stuff */
if (mch_stat((char *)wfname, &st) < 0
|| st.st_uid != st_old.st_uid
|| st.st_gid != st_old.st_gid)
{
ignored = fchown(fd, st_old.st_uid, st_old.st_gid);
if (perm >= 0) /* set permission again, may have changed */
(void)mch_setperm(wfname, perm);
}
# endif
buf_setino(buf);
}
else if (!buf->b_dev_valid)
/* Set the inode when creating a new file. */
buf_setino(buf);
#endif
if (close(fd) != 0)
{
errmsg = (char_u *)_("E512: Close failed");
end = 0;
}
#ifdef UNIX
if (made_writable)
perm &= ~0200; /* reset 'w' bit for security reasons */
#endif
if (perm >= 0) /* set perm. of new file same as old file */
(void)mch_setperm(wfname, perm);
#ifdef HAVE_ACL
/*
* Probably need to set the ACL before changing the user (can't set the
* ACL on a file the user doesn't own).
* On Solaris, with ZFS and the aclmode property set to "discard" (the
* default), chmod() discards all part of a file's ACL that don't
* represent the mode of the file. It's non-trivial for us to discover
* whether we're in that situation, so we simply always re-set the ACL.
*/
# ifndef HAVE_SOLARIS_ZFS_ACL
if (!backup_copy)
# endif
mch_set_acl(wfname, acl);
#endif
#ifdef FEAT_CRYPT
if (buf->b_cryptstate != NULL)
{
crypt_free_state(buf->b_cryptstate);
buf->b_cryptstate = NULL;
}
#endif
#if defined(FEAT_MBYTE) && defined(FEAT_EVAL)
if (wfname != fname)
{
/*
* The file was written to a temp file, now it needs to be
* converted with 'charconvert' to (overwrite) the output file.
*/
if (end != 0)
{
if (eval_charconvert(enc_utf8 ? (char_u *)"utf-8" : p_enc,
fenc, wfname, fname) == FAIL)
{
write_info.bw_conv_error = TRUE;
end = 0;
}
}
mch_remove(wfname);
vim_free(wfname);
}
#endif
}
if (end == 0)
{
/*
* Error encountered.
*/
if (errmsg == NULL)
{
#ifdef FEAT_MBYTE
if (write_info.bw_conv_error)
{
if (write_info.bw_conv_error_lnum == 0)
errmsg = (char_u *)_("E513: write error, conversion failed (make 'fenc' empty to override)");
else
{
errmsg_allocated = TRUE;
errmsg = alloc(300);
vim_snprintf((char *)errmsg, 300, _("E513: write error, conversion failed in line %ld (make 'fenc' empty to override)"),
(long)write_info.bw_conv_error_lnum);
}
}
else
#endif
if (got_int)
errmsg = (char_u *)_(e_interr);
else
errmsg = (char_u *)_("E514: write error (file system full?)");
}
/*
* If we have a backup file, try to put it in place of the new file,
* because the new file is probably corrupt. This avoids losing the
* original file when trying to make a backup when writing the file a
* second time.
* When "backup_copy" is set we need to copy the backup over the new
* file. Otherwise rename the backup file.
* If this is OK, don't give the extra warning message.
*/
if (backup != NULL)
{
if (backup_copy)
{
/* This may take a while, if we were interrupted let the user
* know we got the message. */
if (got_int)
{
MSG(_(e_interr));
out_flush();
}
if ((fd = mch_open((char *)backup, O_RDONLY | O_EXTRA, 0)) >= 0)
{
if ((write_info.bw_fd = mch_open((char *)fname,
O_WRONLY | O_CREAT | O_TRUNC | O_EXTRA,
perm & 0777)) >= 0)
{
/* copy the file. */
write_info.bw_buf = smallbuf;
#ifdef HAS_BW_FLAGS
write_info.bw_flags = FIO_NOCONVERT;
#endif
while ((write_info.bw_len = read_eintr(fd, smallbuf,
SMBUFSIZE)) > 0)
if (buf_write_bytes(&write_info) == FAIL)
break;
if (close(write_info.bw_fd) >= 0
&& write_info.bw_len == 0)
end = 1; /* success */
}
close(fd); /* ignore errors for closing read file */
}
}
else
{
if (vim_rename(backup, fname) == 0)
end = 1;
}
}
goto fail;
}
lnum -= start; /* compute number of written lines */
--no_wait_return; /* may wait for return now */
#if !(defined(UNIX) || defined(VMS))
fname = sfname; /* use shortname now, for the messages */
#endif
if (!filtering)
{
msg_add_fname(buf, fname); /* put fname in IObuff with quotes */
c = FALSE;
#ifdef FEAT_MBYTE
if (write_info.bw_conv_error)
{
STRCAT(IObuff, _(" CONVERSION ERROR"));
c = TRUE;
if (write_info.bw_conv_error_lnum != 0)
vim_snprintf_add((char *)IObuff, IOSIZE, _(" in line %ld;"),
(long)write_info.bw_conv_error_lnum);
}
else if (notconverted)
{
STRCAT(IObuff, _("[NOT converted]"));
c = TRUE;
}
else if (converted)
{
STRCAT(IObuff, _("[converted]"));
c = TRUE;
}
#endif
if (device)
{
STRCAT(IObuff, _("[Device]"));
c = TRUE;
}
else if (newfile)
{
STRCAT(IObuff, shortmess(SHM_NEW) ? _("[New]") : _("[New File]"));
c = TRUE;
}
if (no_eol)
{
msg_add_eol();
c = TRUE;
}
/* may add [unix/dos/mac] */
if (msg_add_fileformat(fileformat))
c = TRUE;
#ifdef FEAT_CRYPT
if (wb_flags & FIO_ENCRYPTED)
{
crypt_append_msg(buf);
c = TRUE;
}
#endif
msg_add_lines(c, (long)lnum, nchars); /* add line/char count */
if (!shortmess(SHM_WRITE))
{
if (append)
STRCAT(IObuff, shortmess(SHM_WRI) ? _(" [a]") : _(" appended"));
else
STRCAT(IObuff, shortmess(SHM_WRI) ? _(" [w]") : _(" written"));
}
set_keep_msg(msg_trunc_attr(IObuff, FALSE, 0), 0);
}
/* When written everything correctly: reset 'modified'. Unless not
* writing to the original file and '+' is not in 'cpoptions'. */
if (reset_changed && whole && !append
#ifdef FEAT_MBYTE
&& !write_info.bw_conv_error
#endif
&& (overwriting || vim_strchr(p_cpo, CPO_PLUS) != NULL)
)
{
unchanged(buf, TRUE);
#ifdef FEAT_AUTOCMD
/* b:changedtick is always incremented in unchanged() but that
* should not trigger a TextChanged event. */
if (last_changedtick + 1 == CHANGEDTICK(buf)
&& last_changedtick_buf == buf)
last_changedtick = CHANGEDTICK(buf);
#endif
u_unchanged(buf);
u_update_save_nr(buf);
}
/*
* If written to the current file, update the timestamp of the swap file
* and reset the BF_WRITE_MASK flags. Also sets buf->b_mtime.
*/
if (overwriting)
{
ml_timestamp(buf);
if (append)
buf->b_flags &= ~BF_NEW;
else
buf->b_flags &= ~BF_WRITE_MASK;
}
/*
* If we kept a backup until now, and we are in patch mode, then we make
* the backup file our 'original' file.
*/
if (*p_pm && dobackup)
{
char *org = (char *)buf_modname((buf->b_p_sn || buf->b_shortname),
fname, p_pm, FALSE);
if (backup != NULL)
{
stat_T st;
/*
* If the original file does not exist yet
* the current backup file becomes the original file
*/
if (org == NULL)
EMSG(_("E205: Patchmode: can't save original file"));
else if (mch_stat(org, &st) < 0)
{
vim_rename(backup, (char_u *)org);
vim_free(backup); /* don't delete the file */
backup = NULL;
#ifdef UNIX
set_file_time((char_u *)org, st_old.st_atime, st_old.st_mtime);
#endif
}
}
/*
* If there is no backup file, remember that a (new) file was
* created.
*/
else
{
int empty_fd;
if (org == NULL
|| (empty_fd = mch_open(org,
O_CREAT | O_EXTRA | O_EXCL | O_NOFOLLOW,
perm < 0 ? 0666 : (perm & 0777))) < 0)
EMSG(_("E206: patchmode: can't touch empty original file"));
else
close(empty_fd);
}
if (org != NULL)
{
mch_setperm((char_u *)org, mch_getperm(fname) & 0777);
vim_free(org);
}
}
/*
* Remove the backup unless 'backup' option is set
*/
if (!p_bk && backup != NULL && mch_remove(backup) != 0)
EMSG(_("E207: Can't delete backup file"));
#ifdef FEAT_SUN_WORKSHOP
if (usingSunWorkShop)
workshop_file_saved((char *) ffname);
#endif
goto nofail;
/*
* Finish up. We get here either after failure or success.
*/
fail:
--no_wait_return; /* may wait for return now */
nofail:
/* Done saving, we accept changed buffer warnings again */
buf->b_saving = FALSE;
vim_free(backup);
if (buffer != smallbuf)
vim_free(buffer);
#ifdef FEAT_MBYTE
vim_free(fenc_tofree);
vim_free(write_info.bw_conv_buf);
# ifdef USE_ICONV
if (write_info.bw_iconv_fd != (iconv_t)-1)
{
iconv_close(write_info.bw_iconv_fd);
write_info.bw_iconv_fd = (iconv_t)-1;
}
# endif
#endif
#ifdef HAVE_ACL
mch_free_acl(acl);
#endif
if (errmsg != NULL)
{
int numlen = errnum != NULL ? (int)STRLEN(errnum) : 0;
attr = HL_ATTR(HLF_E); /* set highlight for error messages */
msg_add_fname(buf,
#ifndef UNIX
sfname
#else
fname
#endif
); /* put file name in IObuff with quotes */
if (STRLEN(IObuff) + STRLEN(errmsg) + numlen >= IOSIZE)
IObuff[IOSIZE - STRLEN(errmsg) - numlen - 1] = NUL;
/* If the error message has the form "is ...", put the error number in
* front of the file name. */
if (errnum != NULL)
{
STRMOVE(IObuff + numlen, IObuff);
mch_memmove(IObuff, errnum, (size_t)numlen);
}
STRCAT(IObuff, errmsg);
emsg(IObuff);
if (errmsg_allocated)
vim_free(errmsg);
retval = FAIL;
if (end == 0)
{
MSG_PUTS_ATTR(_("\nWARNING: Original file may be lost or damaged\n"),
attr | MSG_HIST);
MSG_PUTS_ATTR(_("don't quit the editor until the file is successfully written!"),
attr | MSG_HIST);
/* Update the timestamp to avoid an "overwrite changed file"
* prompt when writing again. */
if (mch_stat((char *)fname, &st_old) >= 0)
{
buf_store_time(buf, &st_old, fname);
buf->b_mtime_read = buf->b_mtime;
}
}
}
msg_scroll = msg_save;
#ifdef FEAT_PERSISTENT_UNDO
/*
* When writing the whole file and 'undofile' is set, also write the undo
* file.
*/
if (retval == OK && write_undo_file)
{
char_u hash[UNDO_HASH_SIZE];
sha256_finish(&sha_ctx, hash);
u_write_undo(NULL, FALSE, buf, hash);
}
#endif
#ifdef FEAT_AUTOCMD
#ifdef FEAT_EVAL
if (!should_abort(retval))
#else
if (!got_int)
#endif
{
aco_save_T aco;
curbuf->b_no_eol_lnum = 0; /* in case it was set by the previous read */
/*
* Apply POST autocommands.
* Careful: The autocommands may call buf_write() recursively!
*/
aucmd_prepbuf(&aco, buf);
if (append)
apply_autocmds_exarg(EVENT_FILEAPPENDPOST, fname, fname,
FALSE, curbuf, eap);
else if (filtering)
apply_autocmds_exarg(EVENT_FILTERWRITEPOST, NULL, fname,
FALSE, curbuf, eap);
else if (reset_changed && whole)
apply_autocmds_exarg(EVENT_BUFWRITEPOST, fname, fname,
FALSE, curbuf, eap);
else
apply_autocmds_exarg(EVENT_FILEWRITEPOST, fname, fname,
FALSE, curbuf, eap);
/* restore curwin/curbuf and a few other things */
aucmd_restbuf(&aco);
#ifdef FEAT_EVAL
if (aborting()) /* autocmds may abort script processing */
retval = FALSE;
#endif
}
#endif
got_int |= prev_got_int;
return retval;
}
/*
* Set the name of the current buffer. Use when the buffer doesn't have a
* name and a ":r" or ":w" command with a file name is used.
*/
static int
set_rw_fname(char_u *fname, char_u *sfname)
{
#ifdef FEAT_AUTOCMD
buf_T *buf = curbuf;
/* It's like the unnamed buffer is deleted.... */
if (curbuf->b_p_bl)
apply_autocmds(EVENT_BUFDELETE, NULL, NULL, FALSE, curbuf);
apply_autocmds(EVENT_BUFWIPEOUT, NULL, NULL, FALSE, curbuf);
# ifdef FEAT_EVAL
if (aborting()) /* autocmds may abort script processing */
return FAIL;
# endif
if (curbuf != buf)
{
/* We are in another buffer now, don't do the renaming. */
EMSG(_(e_auchangedbuf));
return FAIL;
}
#endif
if (setfname(curbuf, fname, sfname, FALSE) == OK)
curbuf->b_flags |= BF_NOTEDITED;
#ifdef FEAT_AUTOCMD
/* ....and a new named one is created */
apply_autocmds(EVENT_BUFNEW, NULL, NULL, FALSE, curbuf);
if (curbuf->b_p_bl)
apply_autocmds(EVENT_BUFADD, NULL, NULL, FALSE, curbuf);
# ifdef FEAT_EVAL
if (aborting()) /* autocmds may abort script processing */
return FAIL;
# endif
/* Do filetype detection now if 'filetype' is empty. */
if (*curbuf->b_p_ft == NUL)
{
if (au_has_group((char_u *)"filetypedetect"))
(void)do_doautocmd((char_u *)"filetypedetect BufRead", FALSE, NULL);
do_modelines(0);
}
#endif
return OK;
}
/*
* Put file name into IObuff with quotes.
*/
void
msg_add_fname(buf_T *buf, char_u *fname)
{
if (fname == NULL)
fname = (char_u *)"-stdin-";
home_replace(buf, fname, IObuff + 1, IOSIZE - 4, TRUE);
IObuff[0] = '"';
STRCAT(IObuff, "\" ");
}
/*
* Append message for text mode to IObuff.
* Return TRUE if something appended.
*/
static int
msg_add_fileformat(int eol_type)
{
#ifndef USE_CRNL
if (eol_type == EOL_DOS)
{
STRCAT(IObuff, shortmess(SHM_TEXT) ? _("[dos]") : _("[dos format]"));
return TRUE;
}
#endif
#ifndef USE_CR
if (eol_type == EOL_MAC)
{
STRCAT(IObuff, shortmess(SHM_TEXT) ? _("[mac]") : _("[mac format]"));
return TRUE;
}
#endif
#if defined(USE_CRNL) || defined(USE_CR)
if (eol_type == EOL_UNIX)
{
STRCAT(IObuff, shortmess(SHM_TEXT) ? _("[unix]") : _("[unix format]"));
return TRUE;
}
#endif
return FALSE;
}
/*
* Append line and character count to IObuff.
*/
void
msg_add_lines(
int insert_space,
long lnum,
off_T nchars)
{
char_u *p;
p = IObuff + STRLEN(IObuff);
if (insert_space)
*p++ = ' ';
if (shortmess(SHM_LINES))
vim_snprintf((char *)p, IOSIZE - (p - IObuff),
"%ldL, %lldC", lnum, (varnumber_T)nchars);
else
{
if (lnum == 1)
STRCPY(p, _("1 line, "));
else
sprintf((char *)p, _("%ld lines, "), lnum);
p += STRLEN(p);
if (nchars == 1)
STRCPY(p, _("1 character"));
else
vim_snprintf((char *)p, IOSIZE - (p - IObuff),
_("%lld characters"), (varnumber_T)nchars);
}
}
/*
* Append message for missing line separator to IObuff.
*/
static void
msg_add_eol(void)
{
STRCAT(IObuff, shortmess(SHM_LAST) ? _("[noeol]") : _("[Incomplete last line]"));
}
/*
* Check modification time of file, before writing to it.
* The size isn't checked, because using a tool like "gzip" takes care of
* using the same timestamp but can't set the size.
*/
static int
check_mtime(buf_T *buf, stat_T *st)
{
if (buf->b_mtime_read != 0
&& time_differs((long)st->st_mtime, buf->b_mtime_read))
{
msg_scroll = TRUE; /* don't overwrite messages here */
msg_silent = 0; /* must give this prompt */
/* don't use emsg() here, don't want to flush the buffers */
MSG_ATTR(_("WARNING: The file has been changed since reading it!!!"),
HL_ATTR(HLF_E));
if (ask_yesno((char_u *)_("Do you really want to write to it"),
TRUE) == 'n')
return FAIL;
msg_scroll = FALSE; /* always overwrite the file message now */
}
return OK;
}
static int
time_differs(long t1, long t2)
{
#if defined(__linux__) || defined(MSWIN)
/* On a FAT filesystem, esp. under Linux, there are only 5 bits to store
* the seconds. Since the roundoff is done when flushing the inode, the
* time may change unexpectedly by one second!!! */
return (t1 - t2 > 1 || t2 - t1 > 1);
#else
return (t1 != t2);
#endif
}
/*
* Call write() to write a number of bytes to the file.
* Handles encryption and 'encoding' conversion.
*
* Return FAIL for failure, OK otherwise.
*/
static int
buf_write_bytes(struct bw_info *ip)
{
int wlen;
char_u *buf = ip->bw_buf; /* data to write */
int len = ip->bw_len; /* length of data */
#ifdef HAS_BW_FLAGS
int flags = ip->bw_flags; /* extra flags */
#endif
#ifdef FEAT_MBYTE
/*
* Skip conversion when writing the crypt magic number or the BOM.
*/
if (!(flags & FIO_NOCONVERT))
{
char_u *p;
unsigned c;
int n;
if (flags & FIO_UTF8)
{
/*
* Convert latin1 in the buffer to UTF-8 in the file.
*/
p = ip->bw_conv_buf; /* translate to buffer */
for (wlen = 0; wlen < len; ++wlen)
p += utf_char2bytes(buf[wlen], p);
buf = ip->bw_conv_buf;
len = (int)(p - ip->bw_conv_buf);
}
else if (flags & (FIO_UCS4 | FIO_UTF16 | FIO_UCS2 | FIO_LATIN1))
{
/*
* Convert UTF-8 bytes in the buffer to UCS-2, UCS-4, UTF-16 or
* Latin1 chars in the file.
*/
if (flags & FIO_LATIN1)
p = buf; /* translate in-place (can only get shorter) */
else
p = ip->bw_conv_buf; /* translate to buffer */
for (wlen = 0; wlen < len; wlen += n)
{
if (wlen == 0 && ip->bw_restlen != 0)
{
int l;
/* Use remainder of previous call. Append the start of
* buf[] to get a full sequence. Might still be too
* short! */
l = CONV_RESTLEN - ip->bw_restlen;
if (l > len)
l = len;
mch_memmove(ip->bw_rest + ip->bw_restlen, buf, (size_t)l);
n = utf_ptr2len_len(ip->bw_rest, ip->bw_restlen + l);
if (n > ip->bw_restlen + len)
{
/* We have an incomplete byte sequence at the end to
* be written. We can't convert it without the
* remaining bytes. Keep them for the next call. */
if (ip->bw_restlen + len > CONV_RESTLEN)
return FAIL;
ip->bw_restlen += len;
break;
}
if (n > 1)
c = utf_ptr2char(ip->bw_rest);
else
c = ip->bw_rest[0];
if (n >= ip->bw_restlen)
{
n -= ip->bw_restlen;
ip->bw_restlen = 0;
}
else
{
ip->bw_restlen -= n;
mch_memmove(ip->bw_rest, ip->bw_rest + n,
(size_t)ip->bw_restlen);
n = 0;
}
}
else
{
n = utf_ptr2len_len(buf + wlen, len - wlen);
if (n > len - wlen)
{
/* We have an incomplete byte sequence at the end to
* be written. We can't convert it without the
* remaining bytes. Keep them for the next call. */
if (len - wlen > CONV_RESTLEN)
return FAIL;
ip->bw_restlen = len - wlen;
mch_memmove(ip->bw_rest, buf + wlen,
(size_t)ip->bw_restlen);
break;
}
if (n > 1)
c = utf_ptr2char(buf + wlen);
else
c = buf[wlen];
}
if (ucs2bytes(c, &p, flags) && !ip->bw_conv_error)
{
ip->bw_conv_error = TRUE;
ip->bw_conv_error_lnum = ip->bw_start_lnum;
}
if (c == NL)
++ip->bw_start_lnum;
}
if (flags & FIO_LATIN1)
len = (int)(p - buf);
else
{
buf = ip->bw_conv_buf;
len = (int)(p - ip->bw_conv_buf);
}
}
# ifdef WIN3264
else if (flags & FIO_CODEPAGE)
{
/*
* Convert UTF-8 or codepage to UCS-2 and then to MS-Windows
* codepage.
*/
char_u *from;
size_t fromlen;
char_u *to;
int u8c;
BOOL bad = FALSE;
int needed;
if (ip->bw_restlen > 0)
{
/* Need to concatenate the remainder of the previous call and
* the bytes of the current call. Use the end of the
* conversion buffer for this. */
fromlen = len + ip->bw_restlen;
from = ip->bw_conv_buf + ip->bw_conv_buflen - fromlen;
mch_memmove(from, ip->bw_rest, (size_t)ip->bw_restlen);
mch_memmove(from + ip->bw_restlen, buf, (size_t)len);
}
else
{
from = buf;
fromlen = len;
}
to = ip->bw_conv_buf;
if (enc_utf8)
{
/* Convert from UTF-8 to UCS-2, to the start of the buffer.
* The buffer has been allocated to be big enough. */
while (fromlen > 0)
{
n = (int)utf_ptr2len_len(from, (int)fromlen);
if (n > (int)fromlen) /* incomplete byte sequence */
break;
u8c = utf_ptr2char(from);
*to++ = (u8c & 0xff);
*to++ = (u8c >> 8);
fromlen -= n;
from += n;
}
/* Copy remainder to ip->bw_rest[] to be used for the next
* call. */
if (fromlen > CONV_RESTLEN)
{
/* weird overlong sequence */
ip->bw_conv_error = TRUE;
return FAIL;
}
mch_memmove(ip->bw_rest, from, fromlen);
ip->bw_restlen = (int)fromlen;
}
else
{
/* Convert from enc_codepage to UCS-2, to the start of the
* buffer. The buffer has been allocated to be big enough. */
ip->bw_restlen = 0;
needed = MultiByteToWideChar(enc_codepage,
MB_ERR_INVALID_CHARS, (LPCSTR)from, (int)fromlen,
NULL, 0);
if (needed == 0)
{
/* When conversion fails there may be a trailing byte. */
needed = MultiByteToWideChar(enc_codepage,
MB_ERR_INVALID_CHARS, (LPCSTR)from, (int)fromlen - 1,
NULL, 0);
if (needed == 0)
{
/* Conversion doesn't work. */
ip->bw_conv_error = TRUE;
return FAIL;
}
/* Save the trailing byte for the next call. */
ip->bw_rest[0] = from[fromlen - 1];
ip->bw_restlen = 1;
}
needed = MultiByteToWideChar(enc_codepage, MB_ERR_INVALID_CHARS,
(LPCSTR)from, (int)(fromlen - ip->bw_restlen),
(LPWSTR)to, needed);
if (needed == 0)
{
/* Safety check: Conversion doesn't work. */
ip->bw_conv_error = TRUE;
return FAIL;
}
to += needed * 2;
}
fromlen = to - ip->bw_conv_buf;
buf = to;
# ifdef CP_UTF8 /* VC 4.1 doesn't define CP_UTF8 */
if (FIO_GET_CP(flags) == CP_UTF8)
{
/* Convert from UCS-2 to UTF-8, using the remainder of the
* conversion buffer. Fails when out of space. */
for (from = ip->bw_conv_buf; fromlen > 1; fromlen -= 2)
{
u8c = *from++;
u8c += (*from++ << 8);
to += utf_char2bytes(u8c, to);
if (to + 6 >= ip->bw_conv_buf + ip->bw_conv_buflen)
{
ip->bw_conv_error = TRUE;
return FAIL;
}
}
len = (int)(to - buf);
}
else
#endif
{
/* Convert from UCS-2 to the codepage, using the remainder of
* the conversion buffer. If the conversion uses the default
* character "0", the data doesn't fit in this encoding, so
* fail. */
len = WideCharToMultiByte(FIO_GET_CP(flags), 0,
(LPCWSTR)ip->bw_conv_buf, (int)fromlen / sizeof(WCHAR),
(LPSTR)to, (int)(ip->bw_conv_buflen - fromlen), 0,
&bad);
if (bad)
{
ip->bw_conv_error = TRUE;
return FAIL;
}
}
}
# endif
# ifdef MACOS_CONVERT
else if (flags & FIO_MACROMAN)
{
/*
* Convert UTF-8 or latin1 to Apple MacRoman.
*/
char_u *from;
size_t fromlen;
if (ip->bw_restlen > 0)
{
/* Need to concatenate the remainder of the previous call and
* the bytes of the current call. Use the end of the
* conversion buffer for this. */
fromlen = len + ip->bw_restlen;
from = ip->bw_conv_buf + ip->bw_conv_buflen - fromlen;
mch_memmove(from, ip->bw_rest, (size_t)ip->bw_restlen);
mch_memmove(from + ip->bw_restlen, buf, (size_t)len);
}
else
{
from = buf;
fromlen = len;
}
if (enc2macroman(from, fromlen,
ip->bw_conv_buf, &len, ip->bw_conv_buflen,
ip->bw_rest, &ip->bw_restlen) == FAIL)
{
ip->bw_conv_error = TRUE;
return FAIL;
}
buf = ip->bw_conv_buf;
}
# endif
# ifdef USE_ICONV
if (ip->bw_iconv_fd != (iconv_t)-1)
{
const char *from;
size_t fromlen;
char *to;
size_t tolen;
/* Convert with iconv(). */
if (ip->bw_restlen > 0)
{
char *fp;
/* Need to concatenate the remainder of the previous call and
* the bytes of the current call. Use the end of the
* conversion buffer for this. */
fromlen = len + ip->bw_restlen;
fp = (char *)ip->bw_conv_buf + ip->bw_conv_buflen - fromlen;
mch_memmove(fp, ip->bw_rest, (size_t)ip->bw_restlen);
mch_memmove(fp + ip->bw_restlen, buf, (size_t)len);
from = fp;
tolen = ip->bw_conv_buflen - fromlen;
}
else
{
from = (const char *)buf;
fromlen = len;
tolen = ip->bw_conv_buflen;
}
to = (char *)ip->bw_conv_buf;
if (ip->bw_first)
{
size_t save_len = tolen;
/* output the initial shift state sequence */
(void)iconv(ip->bw_iconv_fd, NULL, NULL, &to, &tolen);
/* There is a bug in iconv() on Linux (which appears to be
* wide-spread) which sets "to" to NULL and messes up "tolen".
*/
if (to == NULL)
{
to = (char *)ip->bw_conv_buf;
tolen = save_len;
}
ip->bw_first = FALSE;
}
/*
* If iconv() has an error or there is not enough room, fail.
*/
if ((iconv(ip->bw_iconv_fd, (void *)&from, &fromlen, &to, &tolen)
== (size_t)-1 && ICONV_ERRNO != ICONV_EINVAL)
|| fromlen > CONV_RESTLEN)
{
ip->bw_conv_error = TRUE;
return FAIL;
}
/* copy remainder to ip->bw_rest[] to be used for the next call. */
if (fromlen > 0)
mch_memmove(ip->bw_rest, (void *)from, fromlen);
ip->bw_restlen = (int)fromlen;
buf = ip->bw_conv_buf;
len = (int)((char_u *)to - ip->bw_conv_buf);
}
# endif
}
#endif /* FEAT_MBYTE */
if (ip->bw_fd < 0)
/* Only checking conversion, which is OK if we get here. */
return OK;
#ifdef FEAT_CRYPT
if (flags & FIO_ENCRYPTED)
{
/* Encrypt the data. Do it in-place if possible, otherwise use an
* allocated buffer. */
if (crypt_works_inplace(ip->bw_buffer->b_cryptstate))
{
crypt_encode_inplace(ip->bw_buffer->b_cryptstate, buf, len);
}
else
{
char_u *outbuf;
len = crypt_encode_alloc(curbuf->b_cryptstate, buf, len, &outbuf);
if (len == 0)
return OK; /* Crypt layer is buffering, will flush later. */
wlen = write_eintr(ip->bw_fd, outbuf, len);
vim_free(outbuf);
return (wlen < len) ? FAIL : OK;
}
}
#endif
wlen = write_eintr(ip->bw_fd, buf, len);
return (wlen < len) ? FAIL : OK;
}
#ifdef FEAT_MBYTE
/*
* Convert a Unicode character to bytes.
* Return TRUE for an error, FALSE when it's OK.
*/
static int
ucs2bytes(
unsigned c, /* in: character */
char_u **pp, /* in/out: pointer to result */
int flags) /* FIO_ flags */
{
char_u *p = *pp;
int error = FALSE;
int cc;
if (flags & FIO_UCS4)
{
if (flags & FIO_ENDIAN_L)
{
*p++ = c;
*p++ = (c >> 8);
*p++ = (c >> 16);
*p++ = (c >> 24);
}
else
{
*p++ = (c >> 24);
*p++ = (c >> 16);
*p++ = (c >> 8);
*p++ = c;
}
}
else if (flags & (FIO_UCS2 | FIO_UTF16))
{
if (c >= 0x10000)
{
if (flags & FIO_UTF16)
{
/* Make two words, ten bits of the character in each. First
* word is 0xd800 - 0xdbff, second one 0xdc00 - 0xdfff */
c -= 0x10000;
if (c >= 0x100000)
error = TRUE;
cc = ((c >> 10) & 0x3ff) + 0xd800;
if (flags & FIO_ENDIAN_L)
{
*p++ = cc;
*p++ = ((unsigned)cc >> 8);
}
else
{
*p++ = ((unsigned)cc >> 8);
*p++ = cc;
}
c = (c & 0x3ff) + 0xdc00;
}
else
error = TRUE;
}
if (flags & FIO_ENDIAN_L)
{
*p++ = c;
*p++ = (c >> 8);
}
else
{
*p++ = (c >> 8);
*p++ = c;
}
}
else /* Latin1 */
{
if (c >= 0x100)
{
error = TRUE;
*p++ = 0xBF;
}
else
*p++ = c;
}
*pp = p;
return error;
}
/*
* Return TRUE if file encoding "fenc" requires conversion from or to
* 'encoding'.
*/
static int
need_conversion(char_u *fenc)
{
int same_encoding;
int enc_flags;
int fenc_flags;
if (*fenc == NUL || STRCMP(p_enc, fenc) == 0)
{
same_encoding = TRUE;
fenc_flags = 0;
}
else
{
/* Ignore difference between "ansi" and "latin1", "ucs-4" and
* "ucs-4be", etc. */
enc_flags = get_fio_flags(p_enc);
fenc_flags = get_fio_flags(fenc);
same_encoding = (enc_flags != 0 && fenc_flags == enc_flags);
}
if (same_encoding)
{
/* Specified encoding matches with 'encoding'. This requires
* conversion when 'encoding' is Unicode but not UTF-8. */
return enc_unicode != 0;
}
/* Encodings differ. However, conversion is not needed when 'enc' is any
* Unicode encoding and the file is UTF-8. */
return !(enc_utf8 && fenc_flags == FIO_UTF8);
}
/*
* Check "ptr" for a unicode encoding and return the FIO_ flags needed for the
* internal conversion.
* if "ptr" is an empty string, use 'encoding'.
*/
static int
get_fio_flags(char_u *ptr)
{
int prop;
if (*ptr == NUL)
ptr = p_enc;
prop = enc_canon_props(ptr);
if (prop & ENC_UNICODE)
{
if (prop & ENC_2BYTE)
{
if (prop & ENC_ENDIAN_L)
return FIO_UCS2 | FIO_ENDIAN_L;
return FIO_UCS2;
}
if (prop & ENC_4BYTE)
{
if (prop & ENC_ENDIAN_L)
return FIO_UCS4 | FIO_ENDIAN_L;
return FIO_UCS4;
}
if (prop & ENC_2WORD)
{
if (prop & ENC_ENDIAN_L)
return FIO_UTF16 | FIO_ENDIAN_L;
return FIO_UTF16;
}
return FIO_UTF8;
}
if (prop & ENC_LATIN1)
return FIO_LATIN1;
/* must be ENC_DBCS, requires iconv() */
return 0;
}
#ifdef WIN3264
/*
* Check "ptr" for a MS-Windows codepage name and return the FIO_ flags needed
* for the conversion MS-Windows can do for us. Also accept "utf-8".
* Used for conversion between 'encoding' and 'fileencoding'.
*/
static int
get_win_fio_flags(char_u *ptr)
{
int cp;
/* Cannot do this when 'encoding' is not utf-8 and not a codepage. */
if (!enc_utf8 && enc_codepage <= 0)
return 0;
cp = encname2codepage(ptr);
if (cp == 0)
{
# ifdef CP_UTF8 /* VC 4.1 doesn't define CP_UTF8 */
if (STRCMP(ptr, "utf-8") == 0)
cp = CP_UTF8;
else
# endif
return 0;
}
return FIO_PUT_CP(cp) | FIO_CODEPAGE;
}
#endif
#ifdef MACOS_CONVERT
/*
* Check "ptr" for a Carbon supported encoding and return the FIO_ flags
* needed for the internal conversion to/from utf-8 or latin1.
*/
static int
get_mac_fio_flags(char_u *ptr)
{
if ((enc_utf8 || STRCMP(p_enc, "latin1") == 0)
&& (enc_canon_props(ptr) & ENC_MACROMAN))
return FIO_MACROMAN;
return 0;
}
#endif
/*
* Check for a Unicode BOM (Byte Order Mark) at the start of p[size].
* "size" must be at least 2.
* Return the name of the encoding and set "*lenp" to the length.
* Returns NULL when no BOM found.
*/
static char_u *
check_for_bom(
char_u *p,
long size,
int *lenp,
int flags)
{
char *name = NULL;
int len = 2;
if (p[0] == 0xef && p[1] == 0xbb && size >= 3 && p[2] == 0xbf
&& (flags == FIO_ALL || flags == FIO_UTF8 || flags == 0))
{
name = "utf-8"; /* EF BB BF */
len = 3;
}
else if (p[0] == 0xff && p[1] == 0xfe)
{
if (size >= 4 && p[2] == 0 && p[3] == 0
&& (flags == FIO_ALL || flags == (FIO_UCS4 | FIO_ENDIAN_L)))
{
name = "ucs-4le"; /* FF FE 00 00 */
len = 4;
}
else if (flags == (FIO_UCS2 | FIO_ENDIAN_L))
name = "ucs-2le"; /* FF FE */
else if (flags == FIO_ALL || flags == (FIO_UTF16 | FIO_ENDIAN_L))
/* utf-16le is preferred, it also works for ucs-2le text */
name = "utf-16le"; /* FF FE */
}
else if (p[0] == 0xfe && p[1] == 0xff
&& (flags == FIO_ALL || flags == FIO_UCS2 || flags == FIO_UTF16))
{
/* Default to utf-16, it works also for ucs-2 text. */
if (flags == FIO_UCS2)
name = "ucs-2"; /* FE FF */
else
name = "utf-16"; /* FE FF */
}
else if (size >= 4 && p[0] == 0 && p[1] == 0 && p[2] == 0xfe
&& p[3] == 0xff && (flags == FIO_ALL || flags == FIO_UCS4))
{
name = "ucs-4"; /* 00 00 FE FF */
len = 4;
}
*lenp = len;
return (char_u *)name;
}
/*
* Generate a BOM in "buf[4]" for encoding "name".
* Return the length of the BOM (zero when no BOM).
*/
static int
make_bom(char_u *buf, char_u *name)
{
int flags;
char_u *p;
flags = get_fio_flags(name);
/* Can't put a BOM in a non-Unicode file. */
if (flags == FIO_LATIN1 || flags == 0)
return 0;
if (flags == FIO_UTF8) /* UTF-8 */
{
buf[0] = 0xef;
buf[1] = 0xbb;
buf[2] = 0xbf;
return 3;
}
p = buf;
(void)ucs2bytes(0xfeff, &p, flags);
return (int)(p - buf);
}
#endif
#if defined(FEAT_VIMINFO) || defined(FEAT_BROWSE) || \
defined(FEAT_QUICKFIX) || defined(FEAT_AUTOCMD) || defined(PROTO)
/*
* Try to find a shortname by comparing the fullname with the current
* directory.
* Returns "full_path" or pointer into "full_path" if shortened.
*/
char_u *
shorten_fname1(char_u *full_path)
{
char_u *dirname;
char_u *p = full_path;
dirname = alloc(MAXPATHL);
if (dirname == NULL)
return full_path;
if (mch_dirname(dirname, MAXPATHL) == OK)
{
p = shorten_fname(full_path, dirname);
if (p == NULL || *p == NUL)
p = full_path;
}
vim_free(dirname);
return p;
}
#endif
/*
* Try to find a shortname by comparing the fullname with the current
* directory.
* Returns NULL if not shorter name possible, pointer into "full_path"
* otherwise.
*/
char_u *
shorten_fname(char_u *full_path, char_u *dir_name)
{
int len;
char_u *p;
if (full_path == NULL)
return NULL;
len = (int)STRLEN(dir_name);
if (fnamencmp(dir_name, full_path, len) == 0)
{
p = full_path + len;
#if defined(MSWIN)
/*
* MSWIN: when a file is in the root directory, dir_name will end in a
* slash, since C: by itself does not define a specific dir. In this
* case p may already be correct. <negri>
*/
if (!((len > 2) && (*(p - 2) == ':')))
#endif
{
if (vim_ispathsep(*p))
++p;
#ifndef VMS /* the path separator is always part of the path */
else
p = NULL;
#endif
}
}
#if defined(MSWIN)
/*
* When using a file in the current drive, remove the drive name:
* "A:\dir\file" -> "\dir\file". This helps when moving a session file on
* a floppy from "A:\dir" to "B:\dir".
*/
else if (len > 3
&& TOUPPER_LOC(full_path[0]) == TOUPPER_LOC(dir_name[0])
&& full_path[1] == ':'
&& vim_ispathsep(full_path[2]))
p = full_path + 2;
#endif
else
p = NULL;
return p;
}
/*
* Shorten filenames for all buffers.
* When "force" is TRUE: Use full path from now on for files currently being
* edited, both for file name and swap file name. Try to shorten the file
* names a bit, if safe to do so.
* When "force" is FALSE: Only try to shorten absolute file names.
* For buffers that have buftype "nofile" or "scratch": never change the file
* name.
*/
void
shorten_fnames(int force)
{
char_u dirname[MAXPATHL];
buf_T *buf;
char_u *p;
mch_dirname(dirname, MAXPATHL);
FOR_ALL_BUFFERS(buf)
{
if (buf->b_fname != NULL
#ifdef FEAT_QUICKFIX
&& !bt_nofile(buf)
#endif
&& !path_with_url(buf->b_fname)
&& (force
|| buf->b_sfname == NULL
|| mch_isFullName(buf->b_sfname)))
{
vim_free(buf->b_sfname);
buf->b_sfname = NULL;
p = shorten_fname(buf->b_ffname, dirname);
if (p != NULL)
{
buf->b_sfname = vim_strsave(p);
buf->b_fname = buf->b_sfname;
}
if (p == NULL || buf->b_fname == NULL)
buf->b_fname = buf->b_ffname;
}
/* Always make the swap file name a full path, a "nofile" buffer may
* also have a swap file. */
mf_fullname(buf->b_ml.ml_mfp);
}
status_redraw_all();
redraw_tabline = TRUE;
}
#if (defined(FEAT_DND) && defined(FEAT_GUI_GTK)) \
|| defined(FEAT_GUI_MSWIN) \
|| defined(FEAT_GUI_MAC) \
|| defined(PROTO)
/*
* Shorten all filenames in "fnames[count]" by current directory.
*/
void
shorten_filenames(char_u **fnames, int count)
{
int i;
char_u dirname[MAXPATHL];
char_u *p;
if (fnames == NULL || count < 1)
return;
mch_dirname(dirname, sizeof(dirname));
for (i = 0; i < count; ++i)
{
if ((p = shorten_fname(fnames[i], dirname)) != NULL)
{
/* shorten_fname() returns pointer in given "fnames[i]". If free
* "fnames[i]" first, "p" becomes invalid. So we need to copy
* "p" first then free fnames[i]. */
p = vim_strsave(p);
vim_free(fnames[i]);
fnames[i] = p;
}
}
}
#endif
/*
* add extension to file name - change path/fo.o.h to path/fo.o.h.ext or
* fo_o_h.ext for MSDOS or when shortname option set.
*
* Assumed that fname is a valid name found in the filesystem we assure that
* the return value is a different name and ends in 'ext'.
* "ext" MUST be at most 4 characters long if it starts with a dot, 3
* characters otherwise.
* Space for the returned name is allocated, must be freed later.
* Returns NULL when out of memory.
*/
char_u *
modname(
char_u *fname,
char_u *ext,
int prepend_dot) /* may prepend a '.' to file name */
{
return buf_modname((curbuf->b_p_sn || curbuf->b_shortname),
fname, ext, prepend_dot);
}
char_u *
buf_modname(
int shortname, /* use 8.3 file name */
char_u *fname,
char_u *ext,
int prepend_dot) /* may prepend a '.' to file name */
{
char_u *retval;
char_u *s;
char_u *e;
char_u *ptr;
int fnamelen, extlen;
extlen = (int)STRLEN(ext);
/*
* If there is no file name we must get the name of the current directory
* (we need the full path in case :cd is used).
*/
if (fname == NULL || *fname == NUL)
{
retval = alloc((unsigned)(MAXPATHL + extlen + 3));
if (retval == NULL)
return NULL;
if (mch_dirname(retval, MAXPATHL) == FAIL ||
(fnamelen = (int)STRLEN(retval)) == 0)
{
vim_free(retval);
return NULL;
}
if (!after_pathsep(retval, retval + fnamelen))
{
retval[fnamelen++] = PATHSEP;
retval[fnamelen] = NUL;
}
prepend_dot = FALSE; /* nothing to prepend a dot to */
}
else
{
fnamelen = (int)STRLEN(fname);
retval = alloc((unsigned)(fnamelen + extlen + 3));
if (retval == NULL)
return NULL;
STRCPY(retval, fname);
#ifdef VMS
vms_remove_version(retval); /* we do not need versions here */
#endif
}
/*
* search backwards until we hit a '/', '\' or ':' replacing all '.'
* by '_' for MSDOS or when shortname option set and ext starts with a dot.
* Then truncate what is after the '/', '\' or ':' to 8 characters for
* MSDOS and 26 characters for AMIGA, a lot more for UNIX.
*/
for (ptr = retval + fnamelen; ptr > retval; MB_PTR_BACK(retval, ptr))
{
if (*ext == '.'
#ifdef USE_LONG_FNAME
&& (!USE_LONG_FNAME || shortname)
#else
&& shortname
#endif
)
if (*ptr == '.') /* replace '.' by '_' */
*ptr = '_';
if (vim_ispathsep(*ptr))
{
++ptr;
break;
}
}
/* the file name has at most BASENAMELEN characters. */
if (STRLEN(ptr) > (unsigned)BASENAMELEN)
ptr[BASENAMELEN] = '\0';
s = ptr + STRLEN(ptr);
/*
* For 8.3 file names we may have to reduce the length.
*/
#ifdef USE_LONG_FNAME
if (!USE_LONG_FNAME || shortname)
#else
if (shortname)
#endif
{
/*
* If there is no file name, or the file name ends in '/', and the
* extension starts with '.', put a '_' before the dot, because just
* ".ext" is invalid.
*/
if (fname == NULL || *fname == NUL
|| vim_ispathsep(fname[STRLEN(fname) - 1]))
{
if (*ext == '.')
*s++ = '_';
}
/*
* If the extension starts with '.', truncate the base name at 8
* characters
*/
else if (*ext == '.')
{
if ((size_t)(s - ptr) > (size_t)8)
{
s = ptr + 8;
*s = '\0';
}
}
/*
* If the extension doesn't start with '.', and the file name
* doesn't have an extension yet, append a '.'
*/
else if ((e = vim_strchr(ptr, '.')) == NULL)
*s++ = '.';
/*
* If the extension doesn't start with '.', and there already is an
* extension, it may need to be truncated
*/
else if ((int)STRLEN(e) + extlen > 4)
s = e + 4 - extlen;
}
#if defined(USE_LONG_FNAME) || defined(WIN3264)
/*
* If there is no file name, and the extension starts with '.', put a
* '_' before the dot, because just ".ext" may be invalid if it's on a
* FAT partition, and on HPFS it doesn't matter.
*/
else if ((fname == NULL || *fname == NUL) && *ext == '.')
*s++ = '_';
#endif
/*
* Append the extension.
* ext can start with '.' and cannot exceed 3 more characters.
*/
STRCPY(s, ext);
/*
* Prepend the dot.
*/
if (prepend_dot && !shortname && *(e = gettail(retval)) != '.'
#ifdef USE_LONG_FNAME
&& USE_LONG_FNAME
#endif
)
{
STRMOVE(e + 1, e);
*e = '.';
}
/*
* Check that, after appending the extension, the file name is really
* different.
*/
if (fname != NULL && STRCMP(fname, retval) == 0)
{
/* we search for a character that can be replaced by '_' */
while (--s >= ptr)
{
if (*s != '_')
{
*s = '_';
break;
}
}
if (s < ptr) /* fname was "________.<ext>", how tricky! */
*ptr = 'v';
}
return retval;
}
/*
* Like fgets(), but if the file line is too long, it is truncated and the
* rest of the line is thrown away. Returns TRUE for end-of-file.
*/
int
vim_fgets(char_u *buf, int size, FILE *fp)
{
char *eof;
#define FGETS_SIZE 200
char tbuf[FGETS_SIZE];
buf[size - 2] = NUL;
#ifdef USE_CR
eof = fgets_cr((char *)buf, size, fp);
#else
eof = fgets((char *)buf, size, fp);
#endif
if (buf[size - 2] != NUL && buf[size - 2] != '\n')
{
buf[size - 1] = NUL; /* Truncate the line */
/* Now throw away the rest of the line: */
do
{
tbuf[FGETS_SIZE - 2] = NUL;
#ifdef USE_CR
ignoredp = fgets_cr((char *)tbuf, FGETS_SIZE, fp);
#else
ignoredp = fgets((char *)tbuf, FGETS_SIZE, fp);
#endif
} while (tbuf[FGETS_SIZE - 2] != NUL && tbuf[FGETS_SIZE - 2] != '\n');
}
return (eof == NULL);
}
#if defined(USE_CR) || defined(PROTO)
/*
* Like vim_fgets(), but accept any line terminator: CR, CR-LF or LF.
* Returns TRUE for end-of-file.
* Only used for the Mac, because it's much slower than vim_fgets().
*/
int
tag_fgets(char_u *buf, int size, FILE *fp)
{
int i = 0;
int c;
int eof = FALSE;
for (;;)
{
c = fgetc(fp);
if (c == EOF)
{
eof = TRUE;
break;
}
if (c == '\r')
{
/* Always store a NL for end-of-line. */
if (i < size - 1)
buf[i++] = '\n';
c = fgetc(fp);
if (c != '\n') /* Macintosh format: single CR. */
ungetc(c, fp);
break;
}
if (i < size - 1)
buf[i++] = c;
if (c == '\n')
break;
}
buf[i] = NUL;
return eof;
}
#endif
/*
* rename() only works if both files are on the same file system, this
* function will (attempts to?) copy the file across if rename fails -- webb
* Return -1 for failure, 0 for success.
*/
int
vim_rename(char_u *from, char_u *to)
{
int fd_in;
int fd_out;
int n;
char *errmsg = NULL;
char *buffer;
#ifdef AMIGA
BPTR flock;
#endif
stat_T st;
long perm;
#ifdef HAVE_ACL
vim_acl_T acl; /* ACL from original file */
#endif
int use_tmp_file = FALSE;
/*
* When the names are identical, there is nothing to do. When they refer
* to the same file (ignoring case and slash/backslash differences) but
* the file name differs we need to go through a temp file.
*/
if (fnamecmp(from, to) == 0)
{
if (p_fic && STRCMP(gettail(from), gettail(to)) != 0)
use_tmp_file = TRUE;
else
return 0;
}
/*
* Fail if the "from" file doesn't exist. Avoids that "to" is deleted.
*/
if (mch_stat((char *)from, &st) < 0)
return -1;
#ifdef UNIX
{
stat_T st_to;
/* It's possible for the source and destination to be the same file.
* This happens when "from" and "to" differ in case and are on a FAT32
* filesystem. In that case go through a temp file name. */
if (mch_stat((char *)to, &st_to) >= 0
&& st.st_dev == st_to.st_dev
&& st.st_ino == st_to.st_ino)
use_tmp_file = TRUE;
}
#endif
#ifdef WIN3264
{
BY_HANDLE_FILE_INFORMATION info1, info2;
/* It's possible for the source and destination to be the same file.
* In that case go through a temp file name. This makes rename("foo",
* "./foo") a no-op (in a complicated way). */
if (win32_fileinfo(from, &info1) == FILEINFO_OK
&& win32_fileinfo(to, &info2) == FILEINFO_OK
&& info1.dwVolumeSerialNumber == info2.dwVolumeSerialNumber
&& info1.nFileIndexHigh == info2.nFileIndexHigh
&& info1.nFileIndexLow == info2.nFileIndexLow)
use_tmp_file = TRUE;
}
#endif
if (use_tmp_file)
{
char tempname[MAXPATHL + 1];
/*
* Find a name that doesn't exist and is in the same directory.
* Rename "from" to "tempname" and then rename "tempname" to "to".
*/
if (STRLEN(from) >= MAXPATHL - 5)
return -1;
STRCPY(tempname, from);
for (n = 123; n < 99999; ++n)
{
sprintf((char *)gettail((char_u *)tempname), "%d", n);
if (mch_stat(tempname, &st) < 0)
{
if (mch_rename((char *)from, tempname) == 0)
{
if (mch_rename(tempname, (char *)to) == 0)
return 0;
/* Strange, the second step failed. Try moving the
* file back and return failure. */
mch_rename(tempname, (char *)from);
return -1;
}
/* If it fails for one temp name it will most likely fail
* for any temp name, give up. */
return -1;
}
}
return -1;
}
/*
* Delete the "to" file, this is required on some systems to make the
* mch_rename() work, on other systems it makes sure that we don't have
* two files when the mch_rename() fails.
*/
#ifdef AMIGA
/*
* With MSDOS-compatible filesystems (crossdos, messydos) it is possible
* that the name of the "to" file is the same as the "from" file, even
* though the names are different. To avoid the chance of accidentally
* deleting the "from" file (horror!) we lock it during the remove.
*
* When used for making a backup before writing the file: This should not
* happen with ":w", because startscript() should detect this problem and
* set buf->b_shortname, causing modname() to return a correct ".bak" file
* name. This problem does exist with ":w filename", but then the
* original file will be somewhere else so the backup isn't really
* important. If autoscripting is off the rename may fail.
*/
flock = Lock((UBYTE *)from, (long)ACCESS_READ);
#endif
mch_remove(to);
#ifdef AMIGA
if (flock)
UnLock(flock);
#endif
/*
* First try a normal rename, return if it works.
*/
if (mch_rename((char *)from, (char *)to) == 0)
return 0;
/*
* Rename() failed, try copying the file.
*/
perm = mch_getperm(from);
#ifdef HAVE_ACL
/* For systems that support ACL: get the ACL from the original file. */
acl = mch_get_acl(from);
#endif
fd_in = mch_open((char *)from, O_RDONLY|O_EXTRA, 0);
if (fd_in == -1)
{
#ifdef HAVE_ACL
mch_free_acl(acl);
#endif
return -1;
}
/* Create the new file with same permissions as the original. */
fd_out = mch_open((char *)to,
O_CREAT|O_EXCL|O_WRONLY|O_EXTRA|O_NOFOLLOW, (int)perm);
if (fd_out == -1)
{
close(fd_in);
#ifdef HAVE_ACL
mch_free_acl(acl);
#endif
return -1;
}
buffer = (char *)alloc(BUFSIZE);
if (buffer == NULL)
{
close(fd_out);
close(fd_in);
#ifdef HAVE_ACL
mch_free_acl(acl);
#endif
return -1;
}
while ((n = read_eintr(fd_in, buffer, BUFSIZE)) > 0)
if (write_eintr(fd_out, buffer, n) != n)
{
errmsg = _("E208: Error writing to \"%s\"");
break;
}
vim_free(buffer);
close(fd_in);
if (close(fd_out) < 0)
errmsg = _("E209: Error closing \"%s\"");
if (n < 0)
{
errmsg = _("E210: Error reading \"%s\"");
to = from;
}
#ifndef UNIX /* for Unix mch_open() already set the permission */
mch_setperm(to, perm);
#endif
#ifdef HAVE_ACL
mch_set_acl(to, acl);
mch_free_acl(acl);
#endif
#if defined(HAVE_SELINUX) || defined(HAVE_SMACK)
mch_copy_sec(from, to);
#endif
if (errmsg != NULL)
{
EMSG2(errmsg, to);
return -1;
}
mch_remove(from);
return 0;
}
static int already_warned = FALSE;
/*
* Check if any not hidden buffer has been changed.
* Postpone the check if there are characters in the stuff buffer, a global
* command is being executed, a mapping is being executed or an autocommand is
* busy.
* Returns TRUE if some message was written (screen should be redrawn and
* cursor positioned).
*/
int
check_timestamps(
int focus) /* called for GUI focus event */
{
buf_T *buf;
int didit = 0;
int n;
/* Don't check timestamps while system() or another low-level function may
* cause us to lose and gain focus. */
if (no_check_timestamps > 0)
return FALSE;
/* Avoid doing a check twice. The OK/Reload dialog can cause a focus
* event and we would keep on checking if the file is steadily growing.
* Do check again after typing something. */
if (focus && did_check_timestamps)
{
need_check_timestamps = TRUE;
return FALSE;
}
if (!stuff_empty() || global_busy || !typebuf_typed()
#ifdef FEAT_AUTOCMD
|| autocmd_busy || curbuf_lock > 0 || allbuf_lock > 0
#endif
)
need_check_timestamps = TRUE; /* check later */
else
{
++no_wait_return;
did_check_timestamps = TRUE;
already_warned = FALSE;
FOR_ALL_BUFFERS(buf)
{
/* Only check buffers in a window. */
if (buf->b_nwindows > 0)
{
bufref_T bufref;
set_bufref(&bufref, buf);
n = buf_check_timestamp(buf, focus);
if (didit < n)
didit = n;
if (n > 0 && !bufref_valid(&bufref))
{
/* Autocommands have removed the buffer, start at the
* first one again. */
buf = firstbuf;
continue;
}
}
}
--no_wait_return;
need_check_timestamps = FALSE;
if (need_wait_return && didit == 2)
{
/* make sure msg isn't overwritten */
msg_puts((char_u *)"\n");
out_flush();
}
}
return didit;
}
/*
* Move all the lines from buffer "frombuf" to buffer "tobuf".
* Return OK or FAIL. When FAIL "tobuf" is incomplete and/or "frombuf" is not
* empty.
*/
static int
move_lines(buf_T *frombuf, buf_T *tobuf)
{
buf_T *tbuf = curbuf;
int retval = OK;
linenr_T lnum;
char_u *p;
/* Copy the lines in "frombuf" to "tobuf". */
curbuf = tobuf;
for (lnum = 1; lnum <= frombuf->b_ml.ml_line_count; ++lnum)
{
p = vim_strsave(ml_get_buf(frombuf, lnum, FALSE));
if (p == NULL || ml_append(lnum - 1, p, 0, FALSE) == FAIL)
{
vim_free(p);
retval = FAIL;
break;
}
vim_free(p);
}
/* Delete all the lines in "frombuf". */
if (retval != FAIL)
{
curbuf = frombuf;
for (lnum = curbuf->b_ml.ml_line_count; lnum > 0; --lnum)
if (ml_delete(lnum, FALSE) == FAIL)
{
/* Oops! We could try putting back the saved lines, but that
* might fail again... */
retval = FAIL;
break;
}
}
curbuf = tbuf;
return retval;
}
/*
* Check if buffer "buf" has been changed.
* Also check if the file for a new buffer unexpectedly appeared.
* return 1 if a changed buffer was found.
* return 2 if a message has been displayed.
* return 0 otherwise.
*/
int
buf_check_timestamp(
buf_T *buf,
int focus UNUSED) /* called for GUI focus event */
{
stat_T st;
int stat_res;
int retval = 0;
char_u *path;
char_u *tbuf;
char *mesg = NULL;
char *mesg2 = "";
int helpmesg = FALSE;
int reload = FALSE;
char *reason;
#if defined(FEAT_CON_DIALOG) || defined(FEAT_GUI_DIALOG)
int can_reload = FALSE;
#endif
off_T orig_size = buf->b_orig_size;
int orig_mode = buf->b_orig_mode;
#ifdef FEAT_GUI
int save_mouse_correct = need_mouse_correct;
#endif
#ifdef FEAT_AUTOCMD
static int busy = FALSE;
int n;
char_u *s;
bufref_T bufref;
set_bufref(&bufref, buf);
#endif
/* If there is no file name, the buffer is not loaded, 'buftype' is
* set, we are in the middle of a save or being called recursively: ignore
* this buffer. */
if (buf->b_ffname == NULL
|| buf->b_ml.ml_mfp == NULL
|| *buf->b_p_bt != NUL
|| buf->b_saving
#ifdef FEAT_AUTOCMD
|| busy
#endif
#ifdef FEAT_NETBEANS_INTG
|| isNetbeansBuffer(buf)
#endif
#ifdef FEAT_TERMINAL
|| buf->b_term != NULL
#endif
)
return 0;
if ( !(buf->b_flags & BF_NOTEDITED)
&& buf->b_mtime != 0
&& ((stat_res = mch_stat((char *)buf->b_ffname, &st)) < 0
|| time_differs((long)st.st_mtime, buf->b_mtime)
|| st.st_size != buf->b_orig_size
#ifdef HAVE_ST_MODE
|| (int)st.st_mode != buf->b_orig_mode
#else
|| mch_getperm(buf->b_ffname) != buf->b_orig_mode
#endif
))
{
retval = 1;
/* set b_mtime to stop further warnings (e.g., when executing
* FileChangedShell autocmd) */
if (stat_res < 0)
{
buf->b_mtime = 0;
buf->b_orig_size = 0;
buf->b_orig_mode = 0;
}
else
buf_store_time(buf, &st, buf->b_ffname);
/* Don't do anything for a directory. Might contain the file
* explorer. */
if (mch_isdir(buf->b_fname))
;
/*
* If 'autoread' is set, the buffer has no changes and the file still
* exists, reload the buffer. Use the buffer-local option value if it
* was set, the global option value otherwise.
*/
else if ((buf->b_p_ar >= 0 ? buf->b_p_ar : p_ar)
&& !bufIsChanged(buf) && stat_res >= 0)
reload = TRUE;
else
{
if (stat_res < 0)
reason = "deleted";
else if (bufIsChanged(buf))
reason = "conflict";
else if (orig_size != buf->b_orig_size || buf_contents_changed(buf))
reason = "changed";
else if (orig_mode != buf->b_orig_mode)
reason = "mode";
else
reason = "time";
#ifdef FEAT_AUTOCMD
/*
* Only give the warning if there are no FileChangedShell
* autocommands.
* Avoid being called recursively by setting "busy".
*/
busy = TRUE;
# ifdef FEAT_EVAL
set_vim_var_string(VV_FCS_REASON, (char_u *)reason, -1);
set_vim_var_string(VV_FCS_CHOICE, (char_u *)"", -1);
# endif
++allbuf_lock;
n = apply_autocmds(EVENT_FILECHANGEDSHELL,
buf->b_fname, buf->b_fname, FALSE, buf);
--allbuf_lock;
busy = FALSE;
if (n)
{
if (!bufref_valid(&bufref))
EMSG(_("E246: FileChangedShell autocommand deleted buffer"));
# ifdef FEAT_EVAL
s = get_vim_var_str(VV_FCS_CHOICE);
if (STRCMP(s, "reload") == 0 && *reason != 'd')
reload = TRUE;
else if (STRCMP(s, "ask") == 0)
n = FALSE;
else
# endif
return 2;
}
if (!n)
#endif
{
if (*reason == 'd')
mesg = _("E211: File \"%s\" no longer available");
else
{
helpmesg = TRUE;
#if defined(FEAT_CON_DIALOG) || defined(FEAT_GUI_DIALOG)
can_reload = TRUE;
#endif
/*
* Check if the file contents really changed to avoid
* giving a warning when only the timestamp was set (e.g.,
* checked out of CVS). Always warn when the buffer was
* changed.
*/
if (reason[2] == 'n')
{
mesg = _("W12: Warning: File \"%s\" has changed and the buffer was changed in Vim as well");
mesg2 = _("See \":help W12\" for more info.");
}
else if (reason[1] == 'h')
{
mesg = _("W11: Warning: File \"%s\" has changed since editing started");
mesg2 = _("See \":help W11\" for more info.");
}
else if (*reason == 'm')
{
mesg = _("W16: Warning: Mode of file \"%s\" has changed since editing started");
mesg2 = _("See \":help W16\" for more info.");
}
else
/* Only timestamp changed, store it to avoid a warning
* in check_mtime() later. */
buf->b_mtime_read = buf->b_mtime;
}
}
}
}
else if ((buf->b_flags & BF_NEW) && !(buf->b_flags & BF_NEW_W)
&& vim_fexists(buf->b_ffname))
{
retval = 1;
mesg = _("W13: Warning: File \"%s\" has been created after editing started");
buf->b_flags |= BF_NEW_W;
#if defined(FEAT_CON_DIALOG) || defined(FEAT_GUI_DIALOG)
can_reload = TRUE;
#endif
}
if (mesg != NULL)
{
path = home_replace_save(buf, buf->b_fname);
if (path != NULL)
{
if (!helpmesg)
mesg2 = "";
tbuf = alloc((unsigned)(STRLEN(path) + STRLEN(mesg)
+ STRLEN(mesg2) + 2));
sprintf((char *)tbuf, mesg, path);
#ifdef FEAT_EVAL
/* Set warningmsg here, before the unimportant and output-specific
* mesg2 has been appended. */
set_vim_var_string(VV_WARNINGMSG, tbuf, -1);
#endif
#if defined(FEAT_CON_DIALOG) || defined(FEAT_GUI_DIALOG)
if (can_reload)
{
if (*mesg2 != NUL)
{
STRCAT(tbuf, "\n");
STRCAT(tbuf, mesg2);
}
if (do_dialog(VIM_WARNING, (char_u *)_("Warning"), tbuf,
(char_u *)_("&OK\n&Load File"), 1, NULL, TRUE) == 2)
reload = TRUE;
}
else
#endif
if (State > NORMAL_BUSY || (State & CMDLINE) || already_warned)
{
if (*mesg2 != NUL)
{
STRCAT(tbuf, "; ");
STRCAT(tbuf, mesg2);
}
EMSG(tbuf);
retval = 2;
}
else
{
# ifdef FEAT_AUTOCMD
if (!autocmd_busy)
# endif
{
msg_start();
msg_puts_attr(tbuf, HL_ATTR(HLF_E) + MSG_HIST);
if (*mesg2 != NUL)
msg_puts_attr((char_u *)mesg2,
HL_ATTR(HLF_W) + MSG_HIST);
msg_clr_eos();
(void)msg_end();
if (emsg_silent == 0)
{
out_flush();
# ifdef FEAT_GUI
if (!focus)
# endif
/* give the user some time to think about it */
ui_delay(1000L, TRUE);
/* don't redraw and erase the message */
redraw_cmdline = FALSE;
}
}
already_warned = TRUE;
}
vim_free(path);
vim_free(tbuf);
}
}
if (reload)
{
/* Reload the buffer. */
buf_reload(buf, orig_mode);
#ifdef FEAT_PERSISTENT_UNDO
if (buf->b_p_udf && buf->b_ffname != NULL)
{
char_u hash[UNDO_HASH_SIZE];
buf_T *save_curbuf = curbuf;
/* Any existing undo file is unusable, write it now. */
curbuf = buf;
u_compute_hash(hash);
u_write_undo(NULL, FALSE, buf, hash);
curbuf = save_curbuf;
}
#endif
}
#ifdef FEAT_AUTOCMD
/* Trigger FileChangedShell when the file was changed in any way. */
if (bufref_valid(&bufref) && retval != 0)
(void)apply_autocmds(EVENT_FILECHANGEDSHELLPOST,
buf->b_fname, buf->b_fname, FALSE, buf);
#endif
#ifdef FEAT_GUI
/* restore this in case an autocommand has set it; it would break
* 'mousefocus' */
need_mouse_correct = save_mouse_correct;
#endif
return retval;
}
/*
* Reload a buffer that is already loaded.
* Used when the file was changed outside of Vim.
* "orig_mode" is buf->b_orig_mode before the need for reloading was detected.
* buf->b_orig_mode may have been reset already.
*/
void
buf_reload(buf_T *buf, int orig_mode)
{
exarg_T ea;
pos_T old_cursor;
linenr_T old_topline;
int old_ro = buf->b_p_ro;
buf_T *savebuf;
bufref_T bufref;
int saved = OK;
aco_save_T aco;
int flags = READ_NEW;
/* set curwin/curbuf for "buf" and save some things */
aucmd_prepbuf(&aco, buf);
/* We only want to read the text from the file, not reset the syntax
* highlighting, clear marks, diff status, etc. Force the fileformat
* and encoding to be the same. */
if (prep_exarg(&ea, buf) == OK)
{
old_cursor = curwin->w_cursor;
old_topline = curwin->w_topline;
if (p_ur < 0 || curbuf->b_ml.ml_line_count <= p_ur)
{
/* Save all the text, so that the reload can be undone.
* Sync first so that this is a separate undo-able action. */
u_sync(FALSE);
saved = u_savecommon(0, curbuf->b_ml.ml_line_count + 1, 0, TRUE);
flags |= READ_KEEP_UNDO;
}
/*
* To behave like when a new file is edited (matters for
* BufReadPost autocommands) we first need to delete the current
* buffer contents. But if reading the file fails we should keep
* the old contents. Can't use memory only, the file might be
* too big. Use a hidden buffer to move the buffer contents to.
*/
if (BUFEMPTY() || saved == FAIL)
savebuf = NULL;
else
{
/* Allocate a buffer without putting it in the buffer list. */
savebuf = buflist_new(NULL, NULL, (linenr_T)1, BLN_DUMMY);
set_bufref(&bufref, savebuf);
if (savebuf != NULL && buf == curbuf)
{
/* Open the memline. */
curbuf = savebuf;
curwin->w_buffer = savebuf;
saved = ml_open(curbuf);
curbuf = buf;
curwin->w_buffer = buf;
}
if (savebuf == NULL || saved == FAIL || buf != curbuf
|| move_lines(buf, savebuf) == FAIL)
{
EMSG2(_("E462: Could not prepare for reloading \"%s\""),
buf->b_fname);
saved = FAIL;
}
}
if (saved == OK)
{
curbuf->b_flags |= BF_CHECK_RO; /* check for RO again */
#ifdef FEAT_AUTOCMD
keep_filetype = TRUE; /* don't detect 'filetype' */
#endif
if (readfile(buf->b_ffname, buf->b_fname, (linenr_T)0,
(linenr_T)0,
(linenr_T)MAXLNUM, &ea, flags) != OK)
{
#if defined(FEAT_AUTOCMD) && defined(FEAT_EVAL)
if (!aborting())
#endif
EMSG2(_("E321: Could not reload \"%s\""), buf->b_fname);
if (savebuf != NULL && bufref_valid(&bufref) && buf == curbuf)
{
/* Put the text back from the save buffer. First
* delete any lines that readfile() added. */
while (!BUFEMPTY())
if (ml_delete(buf->b_ml.ml_line_count, FALSE) == FAIL)
break;
(void)move_lines(savebuf, buf);
}
}
else if (buf == curbuf) /* "buf" still valid */
{
/* Mark the buffer as unmodified and free undo info. */
unchanged(buf, TRUE);
if ((flags & READ_KEEP_UNDO) == 0)
{
u_blockfree(buf);
u_clearall(buf);
}
else
{
/* Mark all undo states as changed. */
u_unchanged(curbuf);
}
}
}
vim_free(ea.cmd);
if (savebuf != NULL && bufref_valid(&bufref))
wipe_buffer(savebuf, FALSE);
#ifdef FEAT_DIFF
/* Invalidate diff info if necessary. */
diff_invalidate(curbuf);
#endif
/* Restore the topline and cursor position and check it (lines may
* have been removed). */
if (old_topline > curbuf->b_ml.ml_line_count)
curwin->w_topline = curbuf->b_ml.ml_line_count;
else
curwin->w_topline = old_topline;
curwin->w_cursor = old_cursor;
check_cursor();
update_topline();
#ifdef FEAT_AUTOCMD
keep_filetype = FALSE;
#endif
#ifdef FEAT_FOLDING
{
win_T *wp;
tabpage_T *tp;
/* Update folds unless they are defined manually. */
FOR_ALL_TAB_WINDOWS(tp, wp)
if (wp->w_buffer == curwin->w_buffer
&& !foldmethodIsManual(wp))
foldUpdateAll(wp);
}
#endif
/* If the mode didn't change and 'readonly' was set, keep the old
* value; the user probably used the ":view" command. But don't
* reset it, might have had a read error. */
if (orig_mode == curbuf->b_orig_mode)
curbuf->b_p_ro |= old_ro;
/* Modelines must override settings done by autocommands. */
do_modelines(0);
}
/* restore curwin/curbuf and a few other things */
aucmd_restbuf(&aco);
/* Careful: autocommands may have made "buf" invalid! */
}
void
buf_store_time(buf_T *buf, stat_T *st, char_u *fname UNUSED)
{
buf->b_mtime = (long)st->st_mtime;
buf->b_orig_size = st->st_size;
#ifdef HAVE_ST_MODE
buf->b_orig_mode = (int)st->st_mode;
#else
buf->b_orig_mode = mch_getperm(fname);
#endif
}
/*
* Adjust the line with missing eol, used for the next write.
* Used for do_filter(), when the input lines for the filter are deleted.
*/
void
write_lnum_adjust(linenr_T offset)
{
if (curbuf->b_no_eol_lnum != 0) /* only if there is a missing eol */
curbuf->b_no_eol_lnum += offset;
}
#if defined(TEMPDIRNAMES) || defined(FEAT_EVAL) || defined(PROTO)
/*
* Delete "name" and everything in it, recursively.
* return 0 for succes, -1 if some file was not deleted.
*/
int
delete_recursive(char_u *name)
{
int result = 0;
char_u **files;
int file_count;
int i;
char_u *exp;
/* A symbolic link to a directory itself is deleted, not the directory it
* points to. */
if (
# if defined(UNIX) || defined(WIN32)
mch_isrealdir(name)
# else
mch_isdir(name)
# endif
)
{
vim_snprintf((char *)NameBuff, MAXPATHL, "%s/*", name);
exp = vim_strsave(NameBuff);
if (exp == NULL)
return -1;
if (gen_expand_wildcards(1, &exp, &file_count, &files,
EW_DIR|EW_FILE|EW_SILENT|EW_ALLLINKS|EW_DODOT|EW_EMPTYOK) == OK)
{
for (i = 0; i < file_count; ++i)
if (delete_recursive(files[i]) != 0)
result = -1;
FreeWild(file_count, files);
}
else
result = -1;
vim_free(exp);
(void)mch_rmdir(name);
}
else
result = mch_remove(name) == 0 ? 0 : -1;
return result;
}
#endif
#if defined(TEMPDIRNAMES) || defined(PROTO)
static long temp_count = 0; /* Temp filename counter. */
/*
* Delete the temp directory and all files it contains.
*/
void
vim_deltempdir(void)
{
if (vim_tempdir != NULL)
{
/* remove the trailing path separator */
gettail(vim_tempdir)[-1] = NUL;
delete_recursive(vim_tempdir);
vim_free(vim_tempdir);
vim_tempdir = NULL;
}
}
/*
* Directory "tempdir" was created. Expand this name to a full path and put
* it in "vim_tempdir". This avoids that using ":cd" would confuse us.
* "tempdir" must be no longer than MAXPATHL.
*/
static void
vim_settempdir(char_u *tempdir)
{
char_u *buf;
buf = alloc((unsigned)MAXPATHL + 2);
if (buf != NULL)
{
if (vim_FullName(tempdir, buf, MAXPATHL, FALSE) == FAIL)
STRCPY(buf, tempdir);
add_pathsep(buf);
vim_tempdir = vim_strsave(buf);
vim_free(buf);
}
}
#endif
/*
* vim_tempname(): Return a unique name that can be used for a temp file.
*
* The temp file is NOT guaranteed to be created. If "keep" is FALSE it is
* guaranteed to NOT be created.
*
* The returned pointer is to allocated memory.
* The returned pointer is NULL if no valid name was found.
*/
char_u *
vim_tempname(
int extra_char UNUSED, /* char to use in the name instead of '?' */
int keep UNUSED)
{
#ifdef USE_TMPNAM
char_u itmp[L_tmpnam]; /* use tmpnam() */
#else
char_u itmp[TEMPNAMELEN];
#endif
#ifdef TEMPDIRNAMES
static char *(tempdirs[]) = {TEMPDIRNAMES};
int i;
# ifndef EEXIST
stat_T st;
# endif
/*
* This will create a directory for private use by this instance of Vim.
* This is done once, and the same directory is used for all temp files.
* This method avoids security problems because of symlink attacks et al.
* It's also a bit faster, because we only need to check for an existing
* file when creating the directory and not for each temp file.
*/
if (vim_tempdir == NULL)
{
/*
* Try the entries in TEMPDIRNAMES to create the temp directory.
*/
for (i = 0; i < (int)(sizeof(tempdirs) / sizeof(char *)); ++i)
{
# ifndef HAVE_MKDTEMP
size_t itmplen;
long nr;
long off;
# endif
/* Expand $TMP, leave room for "/v1100000/999999999".
* Skip the directory check if the expansion fails. */
expand_env((char_u *)tempdirs[i], itmp, TEMPNAMELEN - 20);
if (itmp[0] != '$' && mch_isdir(itmp))
{
/* directory exists */
add_pathsep(itmp);
# ifdef HAVE_MKDTEMP
{
# if defined(UNIX) || defined(VMS)
/* Make sure the umask doesn't remove the executable bit.
* "repl" has been reported to use "177". */
mode_t umask_save = umask(077);
# endif
/* Leave room for filename */
STRCAT(itmp, "vXXXXXX");
if (mkdtemp((char *)itmp) != NULL)
vim_settempdir(itmp);
# if defined(UNIX) || defined(VMS)
(void)umask(umask_save);
# endif
}
# else
/* Get an arbitrary number of up to 6 digits. When it's
* unlikely that it already exists it will be faster,
* otherwise it doesn't matter. The use of mkdir() avoids any
* security problems because of the predictable number. */
nr = (mch_get_pid() + (long)time(NULL)) % 1000000L;
itmplen = STRLEN(itmp);
/* Try up to 10000 different values until we find a name that
* doesn't exist. */
for (off = 0; off < 10000L; ++off)
{
int r;
# if defined(UNIX) || defined(VMS)
mode_t umask_save;
# endif
sprintf((char *)itmp + itmplen, "v%ld", nr + off);
# ifndef EEXIST
/* If mkdir() does not set errno to EEXIST, check for
* existing file here. There is a race condition then,
* although it's fail-safe. */
if (mch_stat((char *)itmp, &st) >= 0)
continue;
# endif
# if defined(UNIX) || defined(VMS)
/* Make sure the umask doesn't remove the executable bit.
* "repl" has been reported to use "177". */
umask_save = umask(077);
# endif
r = vim_mkdir(itmp, 0700);
# if defined(UNIX) || defined(VMS)
(void)umask(umask_save);
# endif
if (r == 0)
{
vim_settempdir(itmp);
break;
}
# ifdef EEXIST
/* If the mkdir() didn't fail because the file/dir exists,
* we probably can't create any dir here, try another
* place. */
if (errno != EEXIST)
# endif
break;
}
# endif /* HAVE_MKDTEMP */
if (vim_tempdir != NULL)
break;
}
}
}
if (vim_tempdir != NULL)
{
/* There is no need to check if the file exists, because we own the
* directory and nobody else creates a file in it. */
sprintf((char *)itmp, "%s%ld", vim_tempdir, temp_count++);
return vim_strsave(itmp);
}
return NULL;
#else /* TEMPDIRNAMES */
# ifdef WIN3264
char szTempFile[_MAX_PATH + 1];
char buf4[4];
char_u *retval;
char_u *p;
STRCPY(itmp, "");
if (GetTempPath(_MAX_PATH, szTempFile) == 0)
{
szTempFile[0] = '.'; /* GetTempPath() failed, use current dir */
szTempFile[1] = NUL;
}
strcpy(buf4, "VIM");
buf4[2] = extra_char; /* make it "VIa", "VIb", etc. */
if (GetTempFileName(szTempFile, buf4, 0, (LPSTR)itmp) == 0)
return NULL;
if (!keep)
/* GetTempFileName() will create the file, we don't want that */
(void)DeleteFile((LPSTR)itmp);
/* Backslashes in a temp file name cause problems when filtering with
* "sh". NOTE: This also checks 'shellcmdflag' to help those people who
* didn't set 'shellslash'. */
retval = vim_strsave(itmp);
if (*p_shcf == '-' || p_ssl)
for (p = retval; *p; ++p)
if (*p == '\\')
*p = '/';
return retval;
# else /* WIN3264 */
# ifdef USE_TMPNAM
char_u *p;
/* tmpnam() will make its own name */
p = tmpnam((char *)itmp);
if (p == NULL || *p == NUL)
return NULL;
# else
char_u *p;
# ifdef VMS_TEMPNAM
/* mktemp() is not working on VMS. It seems to be
* a do-nothing function. Therefore we use tempnam().
*/
sprintf((char *)itmp, "VIM%c", extra_char);
p = (char_u *)tempnam("tmp:", (char *)itmp);
if (p != NULL)
{
/* VMS will use '.LIS' if we don't explicitly specify an extension,
* and VIM will then be unable to find the file later */
STRCPY(itmp, p);
STRCAT(itmp, ".txt");
free(p);
}
else
return NULL;
# else
STRCPY(itmp, TEMPNAME);
if ((p = vim_strchr(itmp, '?')) != NULL)
*p = extra_char;
if (mktemp((char *)itmp) == NULL)
return NULL;
# endif
# endif
return vim_strsave(itmp);
# endif /* WIN3264 */
#endif /* TEMPDIRNAMES */
}
#if defined(BACKSLASH_IN_FILENAME) || defined(PROTO)
/*
* Convert all backslashes in fname to forward slashes in-place, unless when
* it looks like a URL.
*/
void
forward_slash(char_u *fname)
{
char_u *p;
if (path_with_url(fname))
return;
for (p = fname; *p != NUL; ++p)
# ifdef FEAT_MBYTE
/* The Big5 encoding can have '\' in the trail byte. */
if (enc_dbcs != 0 && (*mb_ptr2len)(p) > 1)
++p;
else
# endif
if (*p == '\\')
*p = '/';
}
#endif
/*
* Code for automatic commands.
*
* Only included when "FEAT_AUTOCMD" has been defined.
*/
#if defined(FEAT_AUTOCMD) || defined(PROTO)
/*
* The autocommands are stored in a list for each event.
* Autocommands for the same pattern, that are consecutive, are joined
* together, to avoid having to match the pattern too often.
* The result is an array of Autopat lists, which point to AutoCmd lists:
*
* first_autopat[0] --> Autopat.next --> Autopat.next --> NULL
* Autopat.cmds Autopat.cmds
* | |
* V V
* AutoCmd.next AutoCmd.next
* | |
* V V
* AutoCmd.next NULL
* |
* V
* NULL
*
* first_autopat[1] --> Autopat.next --> NULL
* Autopat.cmds
* |
* V
* AutoCmd.next
* |
* V
* NULL
* etc.
*
* The order of AutoCmds is important, this is the order in which they were
* defined and will have to be executed.
*/
typedef struct AutoCmd
{
char_u *cmd; /* The command to be executed (NULL
when command has been removed) */
char nested; /* If autocommands nest here */
char last; /* last command in list */
#ifdef FEAT_EVAL
scid_T scriptID; /* script ID where defined */
#endif
struct AutoCmd *next; /* Next AutoCmd in list */
} AutoCmd;
typedef struct AutoPat
{
char_u *pat; /* pattern as typed (NULL when pattern
has been removed) */
regprog_T *reg_prog; /* compiled regprog for pattern */
AutoCmd *cmds; /* list of commands to do */
struct AutoPat *next; /* next AutoPat in AutoPat list */
int group; /* group ID */
int patlen; /* strlen() of pat */
int buflocal_nr; /* !=0 for buffer-local AutoPat */
char allow_dirs; /* Pattern may match whole path */
char last; /* last pattern for apply_autocmds() */
} AutoPat;
static struct event_name
{
char *name; /* event name */
event_T event; /* event number */
} event_names[] =
{
{"BufAdd", EVENT_BUFADD},
{"BufCreate", EVENT_BUFADD},
{"BufDelete", EVENT_BUFDELETE},
{"BufEnter", EVENT_BUFENTER},
{"BufFilePost", EVENT_BUFFILEPOST},
{"BufFilePre", EVENT_BUFFILEPRE},
{"BufHidden", EVENT_BUFHIDDEN},
{"BufLeave", EVENT_BUFLEAVE},
{"BufNew", EVENT_BUFNEW},
{"BufNewFile", EVENT_BUFNEWFILE},
{"BufRead", EVENT_BUFREADPOST},
{"BufReadCmd", EVENT_BUFREADCMD},
{"BufReadPost", EVENT_BUFREADPOST},
{"BufReadPre", EVENT_BUFREADPRE},
{"BufUnload", EVENT_BUFUNLOAD},
{"BufWinEnter", EVENT_BUFWINENTER},
{"BufWinLeave", EVENT_BUFWINLEAVE},
{"BufWipeout", EVENT_BUFWIPEOUT},
{"BufWrite", EVENT_BUFWRITEPRE},
{"BufWritePost", EVENT_BUFWRITEPOST},
{"BufWritePre", EVENT_BUFWRITEPRE},
{"BufWriteCmd", EVENT_BUFWRITECMD},
{"CmdlineEnter", EVENT_CMDLINEENTER},
{"CmdlineLeave", EVENT_CMDLINELEAVE},
{"CmdwinEnter", EVENT_CMDWINENTER},
{"CmdwinLeave", EVENT_CMDWINLEAVE},
{"CmdUndefined", EVENT_CMDUNDEFINED},
{"ColorScheme", EVENT_COLORSCHEME},
{"CompleteDone", EVENT_COMPLETEDONE},
{"CursorHold", EVENT_CURSORHOLD},
{"CursorHoldI", EVENT_CURSORHOLDI},
{"CursorMoved", EVENT_CURSORMOVED},
{"CursorMovedI", EVENT_CURSORMOVEDI},
{"EncodingChanged", EVENT_ENCODINGCHANGED},
{"FileEncoding", EVENT_ENCODINGCHANGED},
{"FileAppendPost", EVENT_FILEAPPENDPOST},
{"FileAppendPre", EVENT_FILEAPPENDPRE},
{"FileAppendCmd", EVENT_FILEAPPENDCMD},
{"FileChangedShell",EVENT_FILECHANGEDSHELL},
{"FileChangedShellPost",EVENT_FILECHANGEDSHELLPOST},
{"FileChangedRO", EVENT_FILECHANGEDRO},
{"FileReadPost", EVENT_FILEREADPOST},
{"FileReadPre", EVENT_FILEREADPRE},
{"FileReadCmd", EVENT_FILEREADCMD},
{"FileType", EVENT_FILETYPE},
{"FileWritePost", EVENT_FILEWRITEPOST},
{"FileWritePre", EVENT_FILEWRITEPRE},
{"FileWriteCmd", EVENT_FILEWRITECMD},
{"FilterReadPost", EVENT_FILTERREADPOST},
{"FilterReadPre", EVENT_FILTERREADPRE},
{"FilterWritePost", EVENT_FILTERWRITEPOST},
{"FilterWritePre", EVENT_FILTERWRITEPRE},
{"FocusGained", EVENT_FOCUSGAINED},
{"FocusLost", EVENT_FOCUSLOST},
{"FuncUndefined", EVENT_FUNCUNDEFINED},
{"GUIEnter", EVENT_GUIENTER},
{"GUIFailed", EVENT_GUIFAILED},
{"InsertChange", EVENT_INSERTCHANGE},
{"InsertEnter", EVENT_INSERTENTER},
{"InsertLeave", EVENT_INSERTLEAVE},
{"InsertCharPre", EVENT_INSERTCHARPRE},
{"MenuPopup", EVENT_MENUPOPUP},
{"OptionSet", EVENT_OPTIONSET},
{"QuickFixCmdPost", EVENT_QUICKFIXCMDPOST},
{"QuickFixCmdPre", EVENT_QUICKFIXCMDPRE},
{"QuitPre", EVENT_QUITPRE},
{"RemoteReply", EVENT_REMOTEREPLY},
{"SessionLoadPost", EVENT_SESSIONLOADPOST},
{"ShellCmdPost", EVENT_SHELLCMDPOST},
{"ShellFilterPost", EVENT_SHELLFILTERPOST},
{"SourcePre", EVENT_SOURCEPRE},
{"SourceCmd", EVENT_SOURCECMD},
{"SpellFileMissing",EVENT_SPELLFILEMISSING},
{"StdinReadPost", EVENT_STDINREADPOST},
{"StdinReadPre", EVENT_STDINREADPRE},
{"SwapExists", EVENT_SWAPEXISTS},
{"Syntax", EVENT_SYNTAX},
{"TabNew", EVENT_TABNEW},
{"TabClosed", EVENT_TABCLOSED},
{"TabEnter", EVENT_TABENTER},
{"TabLeave", EVENT_TABLEAVE},
{"TermChanged", EVENT_TERMCHANGED},
{"TermResponse", EVENT_TERMRESPONSE},
{"TextChanged", EVENT_TEXTCHANGED},
{"TextChangedI", EVENT_TEXTCHANGEDI},
{"User", EVENT_USER},
{"VimEnter", EVENT_VIMENTER},
{"VimLeave", EVENT_VIMLEAVE},
{"VimLeavePre", EVENT_VIMLEAVEPRE},
{"WinNew", EVENT_WINNEW},
{"WinEnter", EVENT_WINENTER},
{"WinLeave", EVENT_WINLEAVE},
{"VimResized", EVENT_VIMRESIZED},
{NULL, (event_T)0}
};
static AutoPat *first_autopat[NUM_EVENTS] =
{
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
};
/*
* struct used to keep status while executing autocommands for an event.
*/
typedef struct AutoPatCmd
{
AutoPat *curpat; /* next AutoPat to examine */
AutoCmd *nextcmd; /* next AutoCmd to execute */
int group; /* group being used */
char_u *fname; /* fname to match with */
char_u *sfname; /* sfname to match with */
char_u *tail; /* tail of fname */
event_T event; /* current event */
int arg_bufnr; /* initially equal to <abuf>, set to zero when
buf is deleted */
struct AutoPatCmd *next; /* chain of active apc-s for auto-invalidation*/
} AutoPatCmd;
static AutoPatCmd *active_apc_list = NULL; /* stack of active autocommands */
/*
* augroups stores a list of autocmd group names.
*/
static garray_T augroups = {0, 0, sizeof(char_u *), 10, NULL};
#define AUGROUP_NAME(i) (((char_u **)augroups.ga_data)[i])
/* use get_deleted_augroup() to get this */
static char_u *deleted_augroup = NULL;
/*
* The ID of the current group. Group 0 is the default one.
*/
static int current_augroup = AUGROUP_DEFAULT;
static int au_need_clean = FALSE; /* need to delete marked patterns */
static void show_autocmd(AutoPat *ap, event_T event);
static void au_remove_pat(AutoPat *ap);
static void au_remove_cmds(AutoPat *ap);
static void au_cleanup(void);
static int au_new_group(char_u *name);
static void au_del_group(char_u *name);
static event_T event_name2nr(char_u *start, char_u **end);
static char_u *event_nr2name(event_T event);
static char_u *find_end_event(char_u *arg, int have_group);
static int event_ignored(event_T event);
static int au_get_grouparg(char_u **argp);
static int do_autocmd_event(event_T event, char_u *pat, int nested, char_u *cmd, int forceit, int group);
static int apply_autocmds_group(event_T event, char_u *fname, char_u *fname_io, int force, int group, buf_T *buf, exarg_T *eap);
static void auto_next_pat(AutoPatCmd *apc, int stop_at_last);
#if defined(FEAT_AUTOCMD) || defined(FEAT_WILDIGN)
static int match_file_pat(char_u *pattern, regprog_T **prog, char_u *fname, char_u *sfname, char_u *tail, int allow_dirs);
#endif
static event_T last_event;
static int last_group;
static int autocmd_blocked = 0; /* block all autocmds */
static char_u *
get_deleted_augroup(void)
{
if (deleted_augroup == NULL)
deleted_augroup = (char_u *)_("--Deleted--");
return deleted_augroup;
}
/*
* Show the autocommands for one AutoPat.
*/
static void
show_autocmd(AutoPat *ap, event_T event)
{
AutoCmd *ac;
/* Check for "got_int" (here and at various places below), which is set
* when "q" has been hit for the "--more--" prompt */
if (got_int)
return;
if (ap->pat == NULL) /* pattern has been removed */
return;
msg_putchar('\n');
if (got_int)
return;
if (event != last_event || ap->group != last_group)
{
if (ap->group != AUGROUP_DEFAULT)
{
if (AUGROUP_NAME(ap->group) == NULL)
msg_puts_attr(get_deleted_augroup(), HL_ATTR(HLF_E));
else
msg_puts_attr(AUGROUP_NAME(ap->group), HL_ATTR(HLF_T));
msg_puts((char_u *)" ");
}
msg_puts_attr(event_nr2name(event), HL_ATTR(HLF_T));
last_event = event;
last_group = ap->group;
msg_putchar('\n');
if (got_int)
return;
}
msg_col = 4;
msg_outtrans(ap->pat);
for (ac = ap->cmds; ac != NULL; ac = ac->next)
{
if (ac->cmd != NULL) /* skip removed commands */
{
if (msg_col >= 14)
msg_putchar('\n');
msg_col = 14;
if (got_int)
return;
msg_outtrans(ac->cmd);
#ifdef FEAT_EVAL
if (p_verbose > 0)
last_set_msg(ac->scriptID);
#endif
if (got_int)
return;
if (ac->next != NULL)
{
msg_putchar('\n');
if (got_int)
return;
}
}
}
}
/*
* Mark an autocommand pattern for deletion.
*/
static void
au_remove_pat(AutoPat *ap)
{
vim_free(ap->pat);
ap->pat = NULL;
ap->buflocal_nr = -1;
au_need_clean = TRUE;
}
/*
* Mark all commands for a pattern for deletion.
*/
static void
au_remove_cmds(AutoPat *ap)
{
AutoCmd *ac;
for (ac = ap->cmds; ac != NULL; ac = ac->next)
{
vim_free(ac->cmd);
ac->cmd = NULL;
}
au_need_clean = TRUE;
}
/*
* Cleanup autocommands and patterns that have been deleted.
* This is only done when not executing autocommands.
*/
static void
au_cleanup(void)
{
AutoPat *ap, **prev_ap;
AutoCmd *ac, **prev_ac;
event_T event;
if (autocmd_busy || !au_need_clean)
return;
/* loop over all events */
for (event = (event_T)0; (int)event < (int)NUM_EVENTS;
event = (event_T)((int)event + 1))
{
/* loop over all autocommand patterns */
prev_ap = &(first_autopat[(int)event]);
for (ap = *prev_ap; ap != NULL; ap = *prev_ap)
{
/* loop over all commands for this pattern */
prev_ac = &(ap->cmds);
for (ac = *prev_ac; ac != NULL; ac = *prev_ac)
{
/* remove the command if the pattern is to be deleted or when
* the command has been marked for deletion */
if (ap->pat == NULL || ac->cmd == NULL)
{
*prev_ac = ac->next;
vim_free(ac->cmd);
vim_free(ac);
}
else
prev_ac = &(ac->next);
}
/* remove the pattern if it has been marked for deletion */
if (ap->pat == NULL)
{
*prev_ap = ap->next;
vim_regfree(ap->reg_prog);
vim_free(ap);
}
else
prev_ap = &(ap->next);
}
}
au_need_clean = FALSE;
}
/*
* Called when buffer is freed, to remove/invalidate related buffer-local
* autocmds.
*/
void
aubuflocal_remove(buf_T *buf)
{
AutoPat *ap;
event_T event;
AutoPatCmd *apc;
/* invalidate currently executing autocommands */
for (apc = active_apc_list; apc; apc = apc->next)
if (buf->b_fnum == apc->arg_bufnr)
apc->arg_bufnr = 0;
/* invalidate buflocals looping through events */
for (event = (event_T)0; (int)event < (int)NUM_EVENTS;
event = (event_T)((int)event + 1))
/* loop over all autocommand patterns */
for (ap = first_autopat[(int)event]; ap != NULL; ap = ap->next)
if (ap->buflocal_nr == buf->b_fnum)
{
au_remove_pat(ap);
if (p_verbose >= 6)
{
verbose_enter();
smsg((char_u *)
_("auto-removing autocommand: %s <buffer=%d>"),
event_nr2name(event), buf->b_fnum);
verbose_leave();
}
}
au_cleanup();
}
/*
* Add an autocmd group name.
* Return it's ID. Returns AUGROUP_ERROR (< 0) for error.
*/
static int
au_new_group(char_u *name)
{
int i;
i = au_find_group(name);
if (i == AUGROUP_ERROR) /* the group doesn't exist yet, add it */
{
/* First try using a free entry. */
for (i = 0; i < augroups.ga_len; ++i)
if (AUGROUP_NAME(i) == NULL)
break;
if (i == augroups.ga_len && ga_grow(&augroups, 1) == FAIL)
return AUGROUP_ERROR;
AUGROUP_NAME(i) = vim_strsave(name);
if (AUGROUP_NAME(i) == NULL)
return AUGROUP_ERROR;
if (i == augroups.ga_len)
++augroups.ga_len;
}
return i;
}
static void
au_del_group(char_u *name)
{
int i;
i = au_find_group(name);
if (i == AUGROUP_ERROR) /* the group doesn't exist */
EMSG2(_("E367: No such group: \"%s\""), name);
else if (i == current_augroup)
EMSG(_("E936: Cannot delete the current group"));
else
{
event_T event;
AutoPat *ap;
int in_use = FALSE;
for (event = (event_T)0; (int)event < (int)NUM_EVENTS;
event = (event_T)((int)event + 1))
{
for (ap = first_autopat[(int)event]; ap != NULL; ap = ap->next)
if (ap->group == i && ap->pat != NULL)
{
give_warning((char_u *)_("W19: Deleting augroup that is still in use"), TRUE);
in_use = TRUE;
event = NUM_EVENTS;
break;
}
}
vim_free(AUGROUP_NAME(i));
if (in_use)
{
AUGROUP_NAME(i) = get_deleted_augroup();
}
else
AUGROUP_NAME(i) = NULL;
}
}
/*
* Find the ID of an autocmd group name.
* Return it's ID. Returns AUGROUP_ERROR (< 0) for error.
*/
static int
au_find_group(char_u *name)
{
int i;
for (i = 0; i < augroups.ga_len; ++i)
if (AUGROUP_NAME(i) != NULL && AUGROUP_NAME(i) != get_deleted_augroup()
&& STRCMP(AUGROUP_NAME(i), name) == 0)
return i;
return AUGROUP_ERROR;
}
/*
* Return TRUE if augroup "name" exists.
*/
int
au_has_group(char_u *name)
{
return au_find_group(name) != AUGROUP_ERROR;
}
/*
* ":augroup {name}".
*/
void
do_augroup(char_u *arg, int del_group)
{
int i;
if (del_group)
{
if (*arg == NUL)
EMSG(_(e_argreq));
else
au_del_group(arg);
}
else if (STRICMP(arg, "end") == 0) /* ":aug end": back to group 0 */
current_augroup = AUGROUP_DEFAULT;
else if (*arg) /* ":aug xxx": switch to group xxx */
{
i = au_new_group(arg);
if (i != AUGROUP_ERROR)
current_augroup = i;
}
else /* ":aug": list the group names */
{
msg_start();
for (i = 0; i < augroups.ga_len; ++i)
{
if (AUGROUP_NAME(i) != NULL)
{
msg_puts(AUGROUP_NAME(i));
msg_puts((char_u *)" ");
}
}
msg_clr_eos();
msg_end();
}
}
#if defined(EXITFREE) || defined(PROTO)
void
free_all_autocmds(void)
{
int i;
char_u *s;
for (current_augroup = -1; current_augroup < augroups.ga_len;
++current_augroup)
do_autocmd((char_u *)"", TRUE);
for (i = 0; i < augroups.ga_len; ++i)
{
s = ((char_u **)(augroups.ga_data))[i];
if (s != get_deleted_augroup())
vim_free(s);
}
ga_clear(&augroups);
}
#endif
/*
* Return the event number for event name "start".
* Return NUM_EVENTS if the event name was not found.
* Return a pointer to the next event name in "end".
*/
static event_T
event_name2nr(char_u *start, char_u **end)
{
char_u *p;
int i;
int len;
/* the event name ends with end of line, '|', a blank or a comma */
for (p = start; *p && !VIM_ISWHITE(*p) && *p != ',' && *p != '|'; ++p)
;
for (i = 0; event_names[i].name != NULL; ++i)
{
len = (int)STRLEN(event_names[i].name);
if (len == p - start && STRNICMP(event_names[i].name, start, len) == 0)
break;
}
if (*p == ',')
++p;
*end = p;
if (event_names[i].name == NULL)
return NUM_EVENTS;
return event_names[i].event;
}
/*
* Return the name for event "event".
*/
static char_u *
event_nr2name(event_T event)
{
int i;
for (i = 0; event_names[i].name != NULL; ++i)
if (event_names[i].event == event)
return (char_u *)event_names[i].name;
return (char_u *)"Unknown";
}
/*
* Scan over the events. "*" stands for all events.
*/
static char_u *
find_end_event(
char_u *arg,
int have_group) /* TRUE when group name was found */
{
char_u *pat;
char_u *p;
if (*arg == '*')
{
if (arg[1] && !VIM_ISWHITE(arg[1]))
{
EMSG2(_("E215: Illegal character after *: %s"), arg);
return NULL;
}
pat = arg + 1;
}
else
{
for (pat = arg; *pat && *pat != '|' && !VIM_ISWHITE(*pat); pat = p)
{
if ((int)event_name2nr(pat, &p) >= (int)NUM_EVENTS)
{
if (have_group)
EMSG2(_("E216: No such event: %s"), pat);
else
EMSG2(_("E216: No such group or event: %s"), pat);
return NULL;
}
}
}
return pat;
}
/*
* Return TRUE if "event" is included in 'eventignore'.
*/
static int
event_ignored(event_T event)
{
char_u *p = p_ei;
while (*p != NUL)
{
if (STRNICMP(p, "all", 3) == 0 && (p[3] == NUL || p[3] == ','))
return TRUE;
if (event_name2nr(p, &p) == event)
return TRUE;
}
return FALSE;
}
/*
* Return OK when the contents of p_ei is valid, FAIL otherwise.
*/
int
check_ei(void)
{
char_u *p = p_ei;
while (*p)
{
if (STRNICMP(p, "all", 3) == 0 && (p[3] == NUL || p[3] == ','))
{
p += 3;
if (*p == ',')
++p;
}
else if (event_name2nr(p, &p) == NUM_EVENTS)
return FAIL;
}
return OK;
}
# if defined(FEAT_SYN_HL) || defined(PROTO)
/*
* Add "what" to 'eventignore' to skip loading syntax highlighting for every
* buffer loaded into the window. "what" must start with a comma.
* Returns the old value of 'eventignore' in allocated memory.
*/
char_u *
au_event_disable(char *what)
{
char_u *new_ei;
char_u *save_ei;
save_ei = vim_strsave(p_ei);
if (save_ei != NULL)
{
new_ei = vim_strnsave(p_ei, (int)(STRLEN(p_ei) + STRLEN(what)));
if (new_ei != NULL)
{
if (*what == ',' && *p_ei == NUL)
STRCPY(new_ei, what + 1);
else
STRCAT(new_ei, what);
set_string_option_direct((char_u *)"ei", -1, new_ei,
OPT_FREE, SID_NONE);
vim_free(new_ei);
}
}
return save_ei;
}
void
au_event_restore(char_u *old_ei)
{
if (old_ei != NULL)
{
set_string_option_direct((char_u *)"ei", -1, old_ei,
OPT_FREE, SID_NONE);
vim_free(old_ei);
}
}
# endif /* FEAT_SYN_HL */
/*
* do_autocmd() -- implements the :autocmd command. Can be used in the
* following ways:
*
* :autocmd <event> <pat> <cmd> Add <cmd> to the list of commands that
* will be automatically executed for <event>
* when editing a file matching <pat>, in
* the current group.
* :autocmd <event> <pat> Show the auto-commands associated with
* <event> and <pat>.
* :autocmd <event> Show the auto-commands associated with
* <event>.
* :autocmd Show all auto-commands.
* :autocmd! <event> <pat> <cmd> Remove all auto-commands associated with
* <event> and <pat>, and add the command
* <cmd>, for the current group.
* :autocmd! <event> <pat> Remove all auto-commands associated with
* <event> and <pat> for the current group.
* :autocmd! <event> Remove all auto-commands associated with
* <event> for the current group.
* :autocmd! Remove ALL auto-commands for the current
* group.
*
* Multiple events and patterns may be given separated by commas. Here are
* some examples:
* :autocmd bufread,bufenter *.c,*.h set tw=0 smartindent noic
* :autocmd bufleave * set tw=79 nosmartindent ic infercase
*
* :autocmd * *.c show all autocommands for *.c files.
*
* Mostly a {group} argument can optionally appear before <event>.
*/
void
do_autocmd(char_u *arg_in, int forceit)
{
char_u *arg = arg_in;
char_u *pat;
char_u *envpat = NULL;
char_u *cmd;
event_T event;
int need_free = FALSE;
int nested = FALSE;
int group;
if (*arg == '|')
{
arg = (char_u *)"";
group = AUGROUP_ALL; /* no argument, use all groups */
}
else
{
/*
* Check for a legal group name. If not, use AUGROUP_ALL.
*/
group = au_get_grouparg(&arg);
if (arg == NULL) /* out of memory */
return;
}
/*
* Scan over the events.
* If we find an illegal name, return here, don't do anything.
*/
pat = find_end_event(arg, group != AUGROUP_ALL);
if (pat == NULL)
return;
pat = skipwhite(pat);
if (*pat == '|')
{
pat = (char_u *)"";
cmd = (char_u *)"";
}
else
{
/*
* Scan over the pattern. Put a NUL at the end.
*/
cmd = pat;
while (*cmd && (!VIM_ISWHITE(*cmd) || cmd[-1] == '\\'))
cmd++;
if (*cmd)
*cmd++ = NUL;
/* Expand environment variables in the pattern. Set 'shellslash', we want
* forward slashes here. */
if (vim_strchr(pat, '$') != NULL || vim_strchr(pat, '~') != NULL)
{
#ifdef BACKSLASH_IN_FILENAME
int p_ssl_save = p_ssl;
p_ssl = TRUE;
#endif
envpat = expand_env_save(pat);
#ifdef BACKSLASH_IN_FILENAME
p_ssl = p_ssl_save;
#endif
if (envpat != NULL)
pat = envpat;
}
/*
* Check for "nested" flag.
*/
cmd = skipwhite(cmd);
if (*cmd != NUL && STRNCMP(cmd, "nested", 6) == 0 && VIM_ISWHITE(cmd[6]))
{
nested = TRUE;
cmd = skipwhite(cmd + 6);
}
/*
* Find the start of the commands.
* Expand <sfile> in it.
*/
if (*cmd != NUL)
{
cmd = expand_sfile(cmd);
if (cmd == NULL) /* some error */
return;
need_free = TRUE;
}
}
/*
* Print header when showing autocommands.
*/
if (!forceit && *cmd == NUL)
{
/* Highlight title */
MSG_PUTS_TITLE(_("\n--- Auto-Commands ---"));
}
/*
* Loop over the events.
*/
last_event = (event_T)-1; /* for listing the event name */
last_group = AUGROUP_ERROR; /* for listing the group name */
if (*arg == '*' || *arg == NUL || *arg == '|')
{
for (event = (event_T)0; (int)event < (int)NUM_EVENTS;
event = (event_T)((int)event + 1))
if (do_autocmd_event(event, pat,
nested, cmd, forceit, group) == FAIL)
break;
}
else
{
while (*arg && *arg != '|' && !VIM_ISWHITE(*arg))
if (do_autocmd_event(event_name2nr(arg, &arg), pat,
nested, cmd, forceit, group) == FAIL)
break;
}
if (need_free)
vim_free(cmd);
vim_free(envpat);
}
/*
* Find the group ID in a ":autocmd" or ":doautocmd" argument.
* The "argp" argument is advanced to the following argument.
*
* Returns the group ID, AUGROUP_ERROR for error (out of memory).
*/
static int
au_get_grouparg(char_u **argp)
{
char_u *group_name;
char_u *p;
char_u *arg = *argp;
int group = AUGROUP_ALL;
for (p = arg; *p && !VIM_ISWHITE(*p) && *p != '|'; ++p)
;
if (p > arg)
{
group_name = vim_strnsave(arg, (int)(p - arg));
if (group_name == NULL) /* out of memory */
return AUGROUP_ERROR;
group = au_find_group(group_name);
if (group == AUGROUP_ERROR)
group = AUGROUP_ALL; /* no match, use all groups */
else
*argp = skipwhite(p); /* match, skip over group name */
vim_free(group_name);
}
return group;
}
/*
* do_autocmd() for one event.
* If *pat == NUL do for all patterns.
* If *cmd == NUL show entries.
* If forceit == TRUE delete entries.
* If group is not AUGROUP_ALL, only use this group.
*/
static int
do_autocmd_event(
event_T event,
char_u *pat,
int nested,
char_u *cmd,
int forceit,
int group)
{
AutoPat *ap;
AutoPat **prev_ap;
AutoCmd *ac;
AutoCmd **prev_ac;
int brace_level;
char_u *endpat;
int findgroup;
int allgroups;
int patlen;
int is_buflocal;
int buflocal_nr;
char_u buflocal_pat[25]; /* for "<buffer=X>" */
if (group == AUGROUP_ALL)
findgroup = current_augroup;
else
findgroup = group;
allgroups = (group == AUGROUP_ALL && !forceit && *cmd == NUL);
/*
* Show or delete all patterns for an event.
*/
if (*pat == NUL)
{
for (ap = first_autopat[(int)event]; ap != NULL; ap = ap->next)
{
if (forceit) /* delete the AutoPat, if it's in the current group */
{
if (ap->group == findgroup)
au_remove_pat(ap);
}
else if (group == AUGROUP_ALL || ap->group == group)
show_autocmd(ap, event);
}
}
/*
* Loop through all the specified patterns.
*/
for ( ; *pat; pat = (*endpat == ',' ? endpat + 1 : endpat))
{
/*
* Find end of the pattern.
* Watch out for a comma in braces, like "*.\{obj,o\}".
*/
brace_level = 0;
for (endpat = pat; *endpat && (*endpat != ',' || brace_level
|| (endpat > pat && endpat[-1] == '\\')); ++endpat)
{
if (*endpat == '{')
brace_level++;
else if (*endpat == '}')
brace_level--;
}
if (pat == endpat) /* ignore single comma */
continue;
patlen = (int)(endpat - pat);
/*
* detect special <buflocal[=X]> buffer-local patterns
*/
is_buflocal = FALSE;
buflocal_nr = 0;
if (patlen >= 8 && STRNCMP(pat, "<buffer", 7) == 0
&& pat[patlen - 1] == '>')
{
/* "<buffer...>": Error will be printed only for addition.
* printing and removing will proceed silently. */
is_buflocal = TRUE;
if (patlen == 8)
/* "<buffer>" */
buflocal_nr = curbuf->b_fnum;
else if (patlen > 9 && pat[7] == '=')
{
if (patlen == 13 && STRNICMP(pat, "<buffer=abuf>", 13) == 0)
/* "<buffer=abuf>" */
buflocal_nr = autocmd_bufnr;
else if (skipdigits(pat + 8) == pat + patlen - 1)
/* "<buffer=123>" */
buflocal_nr = atoi((char *)pat + 8);
}
}
if (is_buflocal)
{
/* normalize pat into standard "<buffer>#N" form */
sprintf((char *)buflocal_pat, "<buffer=%d>", buflocal_nr);
pat = buflocal_pat; /* can modify pat and patlen */
patlen = (int)STRLEN(buflocal_pat); /* but not endpat */
}
/*
* Find AutoPat entries with this pattern.
*/
prev_ap = &first_autopat[(int)event];
while ((ap = *prev_ap) != NULL)
{
if (ap->pat != NULL)
{
/* Accept a pattern when:
* - a group was specified and it's that group, or a group was
* not specified and it's the current group, or a group was
* not specified and we are listing
* - the length of the pattern matches
* - the pattern matches.
* For <buffer[=X]>, this condition works because we normalize
* all buffer-local patterns.
*/
if ((allgroups || ap->group == findgroup)
&& ap->patlen == patlen
&& STRNCMP(pat, ap->pat, patlen) == 0)
{
/*
* Remove existing autocommands.
* If adding any new autocmd's for this AutoPat, don't
* delete the pattern from the autopat list, append to
* this list.
*/
if (forceit)
{
if (*cmd != NUL && ap->next == NULL)
{
au_remove_cmds(ap);
break;
}
au_remove_pat(ap);
}
/*
* Show autocmd's for this autopat, or buflocals <buffer=X>
*/
else if (*cmd == NUL)
show_autocmd(ap, event);
/*
* Add autocmd to this autopat, if it's the last one.
*/
else if (ap->next == NULL)
break;
}
}
prev_ap = &ap->next;
}
/*
* Add a new command.
*/
if (*cmd != NUL)
{
/*
* If the pattern we want to add a command to does appear at the
* end of the list (or not is not in the list at all), add the
* pattern at the end of the list.
*/
if (ap == NULL)
{
/* refuse to add buffer-local ap if buffer number is invalid */
if (is_buflocal && (buflocal_nr == 0
|| buflist_findnr(buflocal_nr) == NULL))
{
EMSGN(_("E680: <buffer=%d>: invalid buffer number "),
buflocal_nr);
return FAIL;
}
ap = (AutoPat *)alloc((unsigned)sizeof(AutoPat));
if (ap == NULL)
return FAIL;
ap->pat = vim_strnsave(pat, patlen);
ap->patlen = patlen;
if (ap->pat == NULL)
{
vim_free(ap);
return FAIL;
}
if (is_buflocal)
{
ap->buflocal_nr = buflocal_nr;
ap->reg_prog = NULL;
}
else
{
char_u *reg_pat;
ap->buflocal_nr = 0;
reg_pat = file_pat_to_reg_pat(pat, endpat,
&ap->allow_dirs, TRUE);
if (reg_pat != NULL)
ap->reg_prog = vim_regcomp(reg_pat, RE_MAGIC);
vim_free(reg_pat);
if (reg_pat == NULL || ap->reg_prog == NULL)
{
vim_free(ap->pat);
vim_free(ap);
return FAIL;
}
}
ap->cmds = NULL;
*prev_ap = ap;
ap->next = NULL;
if (group == AUGROUP_ALL)
ap->group = current_augroup;
else
ap->group = group;
}
/*
* Add the autocmd at the end of the AutoCmd list.
*/
prev_ac = &(ap->cmds);
while ((ac = *prev_ac) != NULL)
prev_ac = &ac->next;
ac = (AutoCmd *)alloc((unsigned)sizeof(AutoCmd));
if (ac == NULL)
return FAIL;
ac->cmd = vim_strsave(cmd);
#ifdef FEAT_EVAL
ac->scriptID = current_SID;
#endif
if (ac->cmd == NULL)
{
vim_free(ac);
return FAIL;
}
ac->next = NULL;
*prev_ac = ac;
ac->nested = nested;
}
}
au_cleanup(); /* may really delete removed patterns/commands now */
return OK;
}
/*
* Implementation of ":doautocmd [group] event [fname]".
* Return OK for success, FAIL for failure;
*/
int
do_doautocmd(
char_u *arg,
int do_msg, /* give message for no matching autocmds? */
int *did_something)
{
char_u *fname;
int nothing_done = TRUE;
int group;
if (did_something != NULL)
*did_something = FALSE;
/*
* Check for a legal group name. If not, use AUGROUP_ALL.
*/
group = au_get_grouparg(&arg);
if (arg == NULL) /* out of memory */
return FAIL;
if (*arg == '*')
{
EMSG(_("E217: Can't execute autocommands for ALL events"));
return FAIL;
}
/*
* Scan over the events.
* If we find an illegal name, return here, don't do anything.
*/
fname = find_end_event(arg, group != AUGROUP_ALL);
if (fname == NULL)
return FAIL;
fname = skipwhite(fname);
/*
* Loop over the events.
*/
while (*arg && !ends_excmd(*arg) && !VIM_ISWHITE(*arg))
if (apply_autocmds_group(event_name2nr(arg, &arg),
fname, NULL, TRUE, group, curbuf, NULL))
nothing_done = FALSE;
if (nothing_done && do_msg)
MSG(_("No matching autocommands"));
if (did_something != NULL)
*did_something = !nothing_done;
#ifdef FEAT_EVAL
return aborting() ? FAIL : OK;
#else
return OK;
#endif
}
/*
* ":doautoall": execute autocommands for each loaded buffer.
*/
void
ex_doautoall(exarg_T *eap)
{
int retval;
aco_save_T aco;
buf_T *buf;
bufref_T bufref;
char_u *arg = eap->arg;
int call_do_modelines = check_nomodeline(&arg);
int did_aucmd;
/*
* This is a bit tricky: For some commands curwin->w_buffer needs to be
* equal to curbuf, but for some buffers there may not be a window.
* So we change the buffer for the current window for a moment. This
* gives problems when the autocommands make changes to the list of
* buffers or windows...
*/
FOR_ALL_BUFFERS(buf)
{
if (buf->b_ml.ml_mfp != NULL)
{
/* find a window for this buffer and save some values */
aucmd_prepbuf(&aco, buf);
set_bufref(&bufref, buf);
/* execute the autocommands for this buffer */
retval = do_doautocmd(arg, FALSE, &did_aucmd);
if (call_do_modelines && did_aucmd)
{
/* Execute the modeline settings, but don't set window-local
* options if we are using the current window for another
* buffer. */
do_modelines(curwin == aucmd_win ? OPT_NOWIN : 0);
}
/* restore the current window */
aucmd_restbuf(&aco);
/* stop if there is some error or buffer was deleted */
if (retval == FAIL || !bufref_valid(&bufref))
break;
}
}
check_cursor(); /* just in case lines got deleted */
}
/*
* Check *argp for <nomodeline>. When it is present return FALSE, otherwise
* return TRUE and advance *argp to after it.
* Thus return TRUE when do_modelines() should be called.
*/
int
check_nomodeline(char_u **argp)
{
if (STRNCMP(*argp, "<nomodeline>", 12) == 0)
{
*argp = skipwhite(*argp + 12);
return FALSE;
}
return TRUE;
}
/*
* Prepare for executing autocommands for (hidden) buffer "buf".
* Search for a visible window containing the current buffer. If there isn't
* one then use "aucmd_win".
* Set "curbuf" and "curwin" to match "buf".
* When FEAT_AUTOCMD is not defined another version is used, see below.
*/
void
aucmd_prepbuf(
aco_save_T *aco, /* structure to save values in */
buf_T *buf) /* new curbuf */
{
win_T *win;
int save_ea;
#ifdef FEAT_AUTOCHDIR
int save_acd;
#endif
/* Find a window that is for the new buffer */
if (buf == curbuf) /* be quick when buf is curbuf */
win = curwin;
else
FOR_ALL_WINDOWS(win)
if (win->w_buffer == buf)
break;
/* Allocate "aucmd_win" when needed. If this fails (out of memory) fall
* back to using the current window. */
if (win == NULL && aucmd_win == NULL)
{
win_alloc_aucmd_win();
if (aucmd_win == NULL)
win = curwin;
}
if (win == NULL && aucmd_win_used)
/* Strange recursive autocommand, fall back to using the current
* window. Expect a few side effects... */
win = curwin;
aco->save_curwin = curwin;
aco->save_curbuf = curbuf;
if (win != NULL)
{
/* There is a window for "buf" in the current tab page, make it the
* curwin. This is preferred, it has the least side effects (esp. if
* "buf" is curbuf). */
aco->use_aucmd_win = FALSE;
curwin = win;
}
else
{
/* There is no window for "buf", use "aucmd_win". To minimize the side
* effects, insert it in the current tab page.
* Anything related to a window (e.g., setting folds) may have
* unexpected results. */
aco->use_aucmd_win = TRUE;
aucmd_win_used = TRUE;
aucmd_win->w_buffer = buf;
aucmd_win->w_s = &buf->b_s;
++buf->b_nwindows;
win_init_empty(aucmd_win); /* set cursor and topline to safe values */
/* Make sure w_localdir and globaldir are NULL to avoid a chdir() in
* win_enter_ext(). */
vim_free(aucmd_win->w_localdir);
aucmd_win->w_localdir = NULL;
aco->globaldir = globaldir;
globaldir = NULL;
/* Split the current window, put the aucmd_win in the upper half.
* We don't want the BufEnter or WinEnter autocommands. */
block_autocmds();
make_snapshot(SNAP_AUCMD_IDX);
save_ea = p_ea;
p_ea = FALSE;
#ifdef FEAT_AUTOCHDIR
/* Prevent chdir() call in win_enter_ext(), through do_autochdir(). */
save_acd = p_acd;
p_acd = FALSE;
#endif
(void)win_split_ins(0, WSP_TOP, aucmd_win, 0);
(void)win_comp_pos(); /* recompute window positions */
p_ea = save_ea;
#ifdef FEAT_AUTOCHDIR
p_acd = save_acd;
#endif
unblock_autocmds();
curwin = aucmd_win;
}
curbuf = buf;
aco->new_curwin = curwin;
set_bufref(&aco->new_curbuf, curbuf);
}
/*
* Cleanup after executing autocommands for a (hidden) buffer.
* Restore the window as it was (if possible).
* When FEAT_AUTOCMD is not defined another version is used, see below.
*/
void
aucmd_restbuf(
aco_save_T *aco) /* structure holding saved values */
{
int dummy;
if (aco->use_aucmd_win)
{
--curbuf->b_nwindows;
/* Find "aucmd_win", it can't be closed, but it may be in another tab
* page. Do not trigger autocommands here. */
block_autocmds();
if (curwin != aucmd_win)
{
tabpage_T *tp;
win_T *wp;
FOR_ALL_TAB_WINDOWS(tp, wp)
{
if (wp == aucmd_win)
{
if (tp != curtab)
goto_tabpage_tp(tp, TRUE, TRUE);
win_goto(aucmd_win);
goto win_found;
}
}
}
win_found:
/* Remove the window and frame from the tree of frames. */
(void)winframe_remove(curwin, &dummy, NULL);
win_remove(curwin, NULL);
aucmd_win_used = FALSE;
last_status(FALSE); /* may need to remove last status line */
if (!valid_tabpage_win(curtab))
/* no valid window in current tabpage */
close_tabpage(curtab);
restore_snapshot(SNAP_AUCMD_IDX, FALSE);
(void)win_comp_pos(); /* recompute window positions */
unblock_autocmds();
if (win_valid(aco->save_curwin))
curwin = aco->save_curwin;
else
/* Hmm, original window disappeared. Just use the first one. */
curwin = firstwin;
#ifdef FEAT_EVAL
vars_clear(&aucmd_win->w_vars->dv_hashtab); /* free all w: variables */
hash_init(&aucmd_win->w_vars->dv_hashtab); /* re-use the hashtab */
#endif
curbuf = curwin->w_buffer;
vim_free(globaldir);
globaldir = aco->globaldir;
/* the buffer contents may have changed */
check_cursor();
if (curwin->w_topline > curbuf->b_ml.ml_line_count)
{
curwin->w_topline = curbuf->b_ml.ml_line_count;
#ifdef FEAT_DIFF
curwin->w_topfill = 0;
#endif
}
#if defined(FEAT_GUI)
/* Hide the scrollbars from the aucmd_win and update. */
gui_mch_enable_scrollbar(&aucmd_win->w_scrollbars[SBAR_LEFT], FALSE);
gui_mch_enable_scrollbar(&aucmd_win->w_scrollbars[SBAR_RIGHT], FALSE);
gui_may_update_scrollbars();
#endif
}
else
{
/* restore curwin */
if (win_valid(aco->save_curwin))
{
/* Restore the buffer which was previously edited by curwin, if
* it was changed, we are still the same window and the buffer is
* valid. */
if (curwin == aco->new_curwin
&& curbuf != aco->new_curbuf.br_buf
&& bufref_valid(&aco->new_curbuf)
&& aco->new_curbuf.br_buf->b_ml.ml_mfp != NULL)
{
# if defined(FEAT_SYN_HL) || defined(FEAT_SPELL)
if (curwin->w_s == &curbuf->b_s)
curwin->w_s = &aco->new_curbuf.br_buf->b_s;
# endif
--curbuf->b_nwindows;
curbuf = aco->new_curbuf.br_buf;
curwin->w_buffer = curbuf;
++curbuf->b_nwindows;
}
curwin = aco->save_curwin;
curbuf = curwin->w_buffer;
/* In case the autocommand move the cursor to a position that that
* not exist in curbuf. */
check_cursor();
}
}
}
static int autocmd_nested = FALSE;
/*
* Execute autocommands for "event" and file name "fname".
* Return TRUE if some commands were executed.
*/
int
apply_autocmds(
event_T event,
char_u *fname, /* NULL or empty means use actual file name */
char_u *fname_io, /* fname to use for <afile> on cmdline */
int force, /* when TRUE, ignore autocmd_busy */
buf_T *buf) /* buffer for <abuf> */
{
return apply_autocmds_group(event, fname, fname_io, force,
AUGROUP_ALL, buf, NULL);
}
/*
* Like apply_autocmds(), but with extra "eap" argument. This takes care of
* setting v:filearg.
*/
static int
apply_autocmds_exarg(
event_T event,
char_u *fname,
char_u *fname_io,
int force,
buf_T *buf,
exarg_T *eap)
{
return apply_autocmds_group(event, fname, fname_io, force,
AUGROUP_ALL, buf, eap);
}
/*
* Like apply_autocmds(), but handles the caller's retval. If the script
* processing is being aborted or if retval is FAIL when inside a try
* conditional, no autocommands are executed. If otherwise the autocommands
* cause the script to be aborted, retval is set to FAIL.
*/
int
apply_autocmds_retval(
event_T event,
char_u *fname, /* NULL or empty means use actual file name */
char_u *fname_io, /* fname to use for <afile> on cmdline */
int force, /* when TRUE, ignore autocmd_busy */
buf_T *buf, /* buffer for <abuf> */
int *retval) /* pointer to caller's retval */
{
int did_cmd;
#ifdef FEAT_EVAL
if (should_abort(*retval))
return FALSE;
#endif
did_cmd = apply_autocmds_group(event, fname, fname_io, force,
AUGROUP_ALL, buf, NULL);
if (did_cmd
#ifdef FEAT_EVAL
&& aborting()
#endif
)
*retval = FAIL;
return did_cmd;
}
/*
* Return TRUE when there is a CursorHold autocommand defined.
*/
int
has_cursorhold(void)
{
return (first_autopat[(int)(get_real_state() == NORMAL_BUSY
? EVENT_CURSORHOLD : EVENT_CURSORHOLDI)] != NULL);
}
/*
* Return TRUE if the CursorHold event can be triggered.
*/
int
trigger_cursorhold(void)
{
int state;
if (!did_cursorhold
&& has_cursorhold()
&& !Recording
&& typebuf.tb_len == 0
#ifdef FEAT_INS_EXPAND
&& !ins_compl_active()
#endif
)
{
state = get_real_state();
if (state == NORMAL_BUSY || (state & INSERT) != 0)
return TRUE;
}
return FALSE;
}
/*
* Return TRUE when there is a CursorMoved autocommand defined.
*/
int
has_cursormoved(void)
{
return (first_autopat[(int)EVENT_CURSORMOVED] != NULL);
}
/*
* Return TRUE when there is a CursorMovedI autocommand defined.
*/
int
has_cursormovedI(void)
{
return (first_autopat[(int)EVENT_CURSORMOVEDI] != NULL);
}
/*
* Return TRUE when there is a TextChanged autocommand defined.
*/
int
has_textchanged(void)
{
return (first_autopat[(int)EVENT_TEXTCHANGED] != NULL);
}
/*
* Return TRUE when there is a TextChangedI autocommand defined.
*/
int
has_textchangedI(void)
{
return (first_autopat[(int)EVENT_TEXTCHANGEDI] != NULL);
}
/*
* Return TRUE when there is an InsertCharPre autocommand defined.
*/
int
has_insertcharpre(void)
{
return (first_autopat[(int)EVENT_INSERTCHARPRE] != NULL);
}
/*
* Return TRUE when there is an CmdUndefined autocommand defined.
*/
int
has_cmdundefined(void)
{
return (first_autopat[(int)EVENT_CMDUNDEFINED] != NULL);
}
/*
* Return TRUE when there is an FuncUndefined autocommand defined.
*/
int
has_funcundefined(void)
{
return (first_autopat[(int)EVENT_FUNCUNDEFINED] != NULL);
}
/*
* Execute autocommands for "event" and file name "fname".
* Return TRUE if some commands were executed.
*/
static int
apply_autocmds_group(
event_T event,
char_u *fname, /* NULL or empty means use actual file name */
char_u *fname_io, /* fname to use for <afile> on cmdline, NULL means
use fname */
int force, /* when TRUE, ignore autocmd_busy */
int group, /* group ID, or AUGROUP_ALL */
buf_T *buf, /* buffer for <abuf> */
exarg_T *eap) /* command arguments */
{
char_u *sfname = NULL; /* short file name */
char_u *tail;
int save_changed;
buf_T *old_curbuf;
int retval = FALSE;
char_u *save_sourcing_name;
linenr_T save_sourcing_lnum;
char_u *save_autocmd_fname;
int save_autocmd_fname_full;
int save_autocmd_bufnr;
char_u *save_autocmd_match;
int save_autocmd_busy;
int save_autocmd_nested;
static int nesting = 0;
AutoPatCmd patcmd;
AutoPat *ap;
#ifdef FEAT_EVAL
scid_T save_current_SID;
void *save_funccalp;
char_u *save_cmdarg;
long save_cmdbang;
#endif
static int filechangeshell_busy = FALSE;
#ifdef FEAT_PROFILE
proftime_T wait_time;
#endif
int did_save_redobuff = FALSE;
save_redo_T save_redo;
/*
* Quickly return if there are no autocommands for this event or
* autocommands are blocked.
*/
if (event == NUM_EVENTS || first_autopat[(int)event] == NULL
|| autocmd_blocked > 0)
goto BYPASS_AU;
/*
* When autocommands are busy, new autocommands are only executed when
* explicitly enabled with the "nested" flag.
*/
if (autocmd_busy && !(force || autocmd_nested))
goto BYPASS_AU;
#ifdef FEAT_EVAL
/*
* Quickly return when immediately aborting on error, or when an interrupt
* occurred or an exception was thrown but not caught.
*/
if (aborting())
goto BYPASS_AU;
#endif
/*
* FileChangedShell never nests, because it can create an endless loop.
*/
if (filechangeshell_busy && (event == EVENT_FILECHANGEDSHELL
|| event == EVENT_FILECHANGEDSHELLPOST))
goto BYPASS_AU;
/*
* Ignore events in 'eventignore'.
*/
if (event_ignored(event))
goto BYPASS_AU;
/*
* Allow nesting of autocommands, but restrict the depth, because it's
* possible to create an endless loop.
*/
if (nesting == 10)
{
EMSG(_("E218: autocommand nesting too deep"));
goto BYPASS_AU;
}
/*
* Check if these autocommands are disabled. Used when doing ":all" or
* ":ball".
*/
if ( (autocmd_no_enter
&& (event == EVENT_WINENTER || event == EVENT_BUFENTER))
|| (autocmd_no_leave
&& (event == EVENT_WINLEAVE || event == EVENT_BUFLEAVE)))
goto BYPASS_AU;
/*
* Save the autocmd_* variables and info about the current buffer.
*/
save_autocmd_fname = autocmd_fname;
save_autocmd_fname_full = autocmd_fname_full;
save_autocmd_bufnr = autocmd_bufnr;
save_autocmd_match = autocmd_match;
save_autocmd_busy = autocmd_busy;
save_autocmd_nested = autocmd_nested;
save_changed = curbuf->b_changed;
old_curbuf = curbuf;
/*
* Set the file name to be used for <afile>.
* Make a copy to avoid that changing a buffer name or directory makes it
* invalid.
*/
if (fname_io == NULL)
{
if (event == EVENT_COLORSCHEME || event == EVENT_OPTIONSET)
autocmd_fname = NULL;
else if (fname != NULL && !ends_excmd(*fname))
autocmd_fname = fname;
else if (buf != NULL)
autocmd_fname = buf->b_ffname;
else
autocmd_fname = NULL;
}
else
autocmd_fname = fname_io;
if (autocmd_fname != NULL)
autocmd_fname = vim_strsave(autocmd_fname);
autocmd_fname_full = FALSE; /* call FullName_save() later */
/*
* Set the buffer number to be used for <abuf>.
*/
if (buf == NULL)
autocmd_bufnr = 0;
else
autocmd_bufnr = buf->b_fnum;
/*
* When the file name is NULL or empty, use the file name of buffer "buf".
* Always use the full path of the file name to match with, in case
* "allow_dirs" is set.
*/
if (fname == NULL || *fname == NUL)
{
if (buf == NULL)
fname = NULL;
else
{
#ifdef FEAT_SYN_HL
if (event == EVENT_SYNTAX)
fname = buf->b_p_syn;
else
#endif
if (event == EVENT_FILETYPE)
fname = buf->b_p_ft;
else
{
if (buf->b_sfname != NULL)
sfname = vim_strsave(buf->b_sfname);
fname = buf->b_ffname;
}
}
if (fname == NULL)
fname = (char_u *)"";
fname = vim_strsave(fname); /* make a copy, so we can change it */
}
else
{
sfname = vim_strsave(fname);
/* Don't try expanding FileType, Syntax, FuncUndefined, WindowID,
* ColorScheme or QuickFixCmd* */
if (event == EVENT_FILETYPE
|| event == EVENT_SYNTAX
|| event == EVENT_FUNCUNDEFINED
|| event == EVENT_REMOTEREPLY
|| event == EVENT_SPELLFILEMISSING
|| event == EVENT_QUICKFIXCMDPRE
|| event == EVENT_COLORSCHEME
|| event == EVENT_OPTIONSET
|| event == EVENT_QUICKFIXCMDPOST)
fname = vim_strsave(fname);
else
fname = FullName_save(fname, FALSE);
}
if (fname == NULL) /* out of memory */
{
vim_free(sfname);
retval = FALSE;
goto BYPASS_AU;
}
#ifdef BACKSLASH_IN_FILENAME
/*
* Replace all backslashes with forward slashes. This makes the
* autocommand patterns portable between Unix and MS-DOS.
*/
if (sfname != NULL)
forward_slash(sfname);
forward_slash(fname);
#endif
#ifdef VMS
/* remove version for correct match */
if (sfname != NULL)
vms_remove_version(sfname);
vms_remove_version(fname);
#endif
/*
* Set the name to be used for <amatch>.
*/
autocmd_match = fname;
/* Don't redraw while doing auto commands. */
++RedrawingDisabled;
save_sourcing_name = sourcing_name;
sourcing_name = NULL; /* don't free this one */
save_sourcing_lnum = sourcing_lnum;
sourcing_lnum = 0; /* no line number here */
#ifdef FEAT_EVAL
save_current_SID = current_SID;
# ifdef FEAT_PROFILE
if (do_profiling == PROF_YES)
prof_child_enter(&wait_time); /* doesn't count for the caller itself */
# endif
/* Don't use local function variables, if called from a function */
save_funccalp = save_funccal();
#endif
/*
* When starting to execute autocommands, save the search patterns.
*/
if (!autocmd_busy)
{
save_search_patterns();
#ifdef FEAT_INS_EXPAND
if (!ins_compl_active())
#endif
{
saveRedobuff(&save_redo);
did_save_redobuff = TRUE;
}
did_filetype = keep_filetype;
}
/*
* Note that we are applying autocmds. Some commands need to know.
*/
autocmd_busy = TRUE;
filechangeshell_busy = (event == EVENT_FILECHANGEDSHELL);
++nesting; /* see matching decrement below */
/* Remember that FileType was triggered. Used for did_filetype(). */
if (event == EVENT_FILETYPE)
did_filetype = TRUE;
tail = gettail(fname);
/* Find first autocommand that matches */
patcmd.curpat = first_autopat[(int)event];
patcmd.nextcmd = NULL;
patcmd.group = group;
patcmd.fname = fname;
patcmd.sfname = sfname;
patcmd.tail = tail;
patcmd.event = event;
patcmd.arg_bufnr = autocmd_bufnr;
patcmd.next = NULL;
auto_next_pat(&patcmd, FALSE);
/* found one, start executing the autocommands */
if (patcmd.curpat != NULL)
{
/* add to active_apc_list */
patcmd.next = active_apc_list;
active_apc_list = &patcmd;
#ifdef FEAT_EVAL
/* set v:cmdarg (only when there is a matching pattern) */
save_cmdbang = (long)get_vim_var_nr(VV_CMDBANG);
if (eap != NULL)
{
save_cmdarg = set_cmdarg(eap, NULL);
set_vim_var_nr(VV_CMDBANG, (long)eap->forceit);
}
else
save_cmdarg = NULL; /* avoid gcc warning */
#endif
retval = TRUE;
/* mark the last pattern, to avoid an endless loop when more patterns
* are added when executing autocommands */
for (ap = patcmd.curpat; ap->next != NULL; ap = ap->next)
ap->last = FALSE;
ap->last = TRUE;
check_lnums(TRUE); /* make sure cursor and topline are valid */
do_cmdline(NULL, getnextac, (void *)&patcmd,
DOCMD_NOWAIT|DOCMD_VERBOSE|DOCMD_REPEAT);
#ifdef FEAT_EVAL
if (eap != NULL)
{
(void)set_cmdarg(NULL, save_cmdarg);
set_vim_var_nr(VV_CMDBANG, save_cmdbang);
}
#endif
/* delete from active_apc_list */
if (active_apc_list == &patcmd) /* just in case */
active_apc_list = patcmd.next;
}
--RedrawingDisabled;
autocmd_busy = save_autocmd_busy;
filechangeshell_busy = FALSE;
autocmd_nested = save_autocmd_nested;
vim_free(sourcing_name);
sourcing_name = save_sourcing_name;
sourcing_lnum = save_sourcing_lnum;
vim_free(autocmd_fname);
autocmd_fname = save_autocmd_fname;
autocmd_fname_full = save_autocmd_fname_full;
autocmd_bufnr = save_autocmd_bufnr;
autocmd_match = save_autocmd_match;
#ifdef FEAT_EVAL
current_SID = save_current_SID;
restore_funccal(save_funccalp);
# ifdef FEAT_PROFILE
if (do_profiling == PROF_YES)
prof_child_exit(&wait_time);
# endif
#endif
vim_free(fname);
vim_free(sfname);
--nesting; /* see matching increment above */
/*
* When stopping to execute autocommands, restore the search patterns and
* the redo buffer. Free any buffers in the au_pending_free_buf list and
* free any windows in the au_pending_free_win list.
*/
if (!autocmd_busy)
{
restore_search_patterns();
if (did_save_redobuff)
restoreRedobuff(&save_redo);
did_filetype = FALSE;
while (au_pending_free_buf != NULL)
{
buf_T *b = au_pending_free_buf->b_next;
vim_free(au_pending_free_buf);
au_pending_free_buf = b;
}
while (au_pending_free_win != NULL)
{
win_T *w = au_pending_free_win->w_next;
vim_free(au_pending_free_win);
au_pending_free_win = w;
}
}
/*
* Some events don't set or reset the Changed flag.
* Check if still in the same buffer!
*/
if (curbuf == old_curbuf
&& (event == EVENT_BUFREADPOST
|| event == EVENT_BUFWRITEPOST
|| event == EVENT_FILEAPPENDPOST
|| event == EVENT_VIMLEAVE
|| event == EVENT_VIMLEAVEPRE))
{
#ifdef FEAT_TITLE
if (curbuf->b_changed != save_changed)
need_maketitle = TRUE;
#endif
curbuf->b_changed = save_changed;
}
au_cleanup(); /* may really delete removed patterns/commands now */
BYPASS_AU:
/* When wiping out a buffer make sure all its buffer-local autocommands
* are deleted. */
if (event == EVENT_BUFWIPEOUT && buf != NULL)
aubuflocal_remove(buf);
if (retval == OK && event == EVENT_FILETYPE)
au_did_filetype = TRUE;
return retval;
}
# ifdef FEAT_EVAL
static char_u *old_termresponse = NULL;
# endif
/*
* Block triggering autocommands until unblock_autocmd() is called.
* Can be used recursively, so long as it's symmetric.
*/
void
block_autocmds(void)
{
# ifdef FEAT_EVAL
/* Remember the value of v:termresponse. */
if (autocmd_blocked == 0)
old_termresponse = get_vim_var_str(VV_TERMRESPONSE);
# endif
++autocmd_blocked;
}
void
unblock_autocmds(void)
{
--autocmd_blocked;
# ifdef FEAT_EVAL
/* When v:termresponse was set while autocommands were blocked, trigger
* the autocommands now. Esp. useful when executing a shell command
* during startup (vimdiff). */
if (autocmd_blocked == 0
&& get_vim_var_str(VV_TERMRESPONSE) != old_termresponse)
apply_autocmds(EVENT_TERMRESPONSE, NULL, NULL, FALSE, curbuf);
# endif
}
int
is_autocmd_blocked(void)
{
return autocmd_blocked != 0;
}
/*
* Find next autocommand pattern that matches.
*/
static void
auto_next_pat(
AutoPatCmd *apc,
int stop_at_last) /* stop when 'last' flag is set */
{
AutoPat *ap;
AutoCmd *cp;
char_u *name;
char *s;
vim_free(sourcing_name);
sourcing_name = NULL;
for (ap = apc->curpat; ap != NULL && !got_int; ap = ap->next)
{
apc->curpat = NULL;
/* Only use a pattern when it has not been removed, has commands and
* the group matches. For buffer-local autocommands only check the
* buffer number. */
if (ap->pat != NULL && ap->cmds != NULL
&& (apc->group == AUGROUP_ALL || apc->group == ap->group))
{
/* execution-condition */
if (ap->buflocal_nr == 0
? (match_file_pat(NULL, &ap->reg_prog, apc->fname,
apc->sfname, apc->tail, ap->allow_dirs))
: ap->buflocal_nr == apc->arg_bufnr)
{
name = event_nr2name(apc->event);
s = _("%s Auto commands for \"%s\"");
sourcing_name = alloc((unsigned)(STRLEN(s)
+ STRLEN(name) + ap->patlen + 1));
if (sourcing_name != NULL)
{
sprintf((char *)sourcing_name, s,
(char *)name, (char *)ap->pat);
if (p_verbose >= 8)
{
verbose_enter();
smsg((char_u *)_("Executing %s"), sourcing_name);
verbose_leave();
}
}
apc->curpat = ap;
apc->nextcmd = ap->cmds;
/* mark last command */
for (cp = ap->cmds; cp->next != NULL; cp = cp->next)
cp->last = FALSE;
cp->last = TRUE;
}
line_breakcheck();
if (apc->curpat != NULL) /* found a match */
break;
}
if (stop_at_last && ap->last)
break;
}
}
/*
* Get next autocommand command.
* Called by do_cmdline() to get the next line for ":if".
* Returns allocated string, or NULL for end of autocommands.
*/
char_u *
getnextac(int c UNUSED, void *cookie, int indent UNUSED)
{
AutoPatCmd *acp = (AutoPatCmd *)cookie;
char_u *retval;
AutoCmd *ac;
/* Can be called again after returning the last line. */
if (acp->curpat == NULL)
return NULL;
/* repeat until we find an autocommand to execute */
for (;;)
{
/* skip removed commands */
while (acp->nextcmd != NULL && acp->nextcmd->cmd == NULL)
if (acp->nextcmd->last)
acp->nextcmd = NULL;
else
acp->nextcmd = acp->nextcmd->next;
if (acp->nextcmd != NULL)
break;
/* at end of commands, find next pattern that matches */
if (acp->curpat->last)
acp->curpat = NULL;
else
acp->curpat = acp->curpat->next;
if (acp->curpat != NULL)
auto_next_pat(acp, TRUE);
if (acp->curpat == NULL)
return NULL;
}
ac = acp->nextcmd;
if (p_verbose >= 9)
{
verbose_enter_scroll();
smsg((char_u *)_("autocommand %s"), ac->cmd);
msg_puts((char_u *)"\n"); /* don't overwrite this either */
verbose_leave_scroll();
}
retval = vim_strsave(ac->cmd);
autocmd_nested = ac->nested;
#ifdef FEAT_EVAL
current_SID = ac->scriptID;
#endif
if (ac->last)
acp->nextcmd = NULL;
else
acp->nextcmd = ac->next;
return retval;
}
/*
* Return TRUE if there is a matching autocommand for "fname".
* To account for buffer-local autocommands, function needs to know
* in which buffer the file will be opened.
*/
int
has_autocmd(event_T event, char_u *sfname, buf_T *buf)
{
AutoPat *ap;
char_u *fname;
char_u *tail = gettail(sfname);
int retval = FALSE;
fname = FullName_save(sfname, FALSE);
if (fname == NULL)
return FALSE;
#ifdef BACKSLASH_IN_FILENAME
/*
* Replace all backslashes with forward slashes. This makes the
* autocommand patterns portable between Unix and MS-DOS.
*/
sfname = vim_strsave(sfname);
if (sfname != NULL)
forward_slash(sfname);
forward_slash(fname);
#endif
for (ap = first_autopat[(int)event]; ap != NULL; ap = ap->next)
if (ap->pat != NULL && ap->cmds != NULL
&& (ap->buflocal_nr == 0
? match_file_pat(NULL, &ap->reg_prog,
fname, sfname, tail, ap->allow_dirs)
: buf != NULL && ap->buflocal_nr == buf->b_fnum
))
{
retval = TRUE;
break;
}
vim_free(fname);
#ifdef BACKSLASH_IN_FILENAME
vim_free(sfname);
#endif
return retval;
}
#if defined(FEAT_CMDL_COMPL) || defined(PROTO)
/*
* Function given to ExpandGeneric() to obtain the list of autocommand group
* names.
*/
char_u *
get_augroup_name(expand_T *xp UNUSED, int idx)
{
if (idx == augroups.ga_len) /* add "END" add the end */
return (char_u *)"END";
if (idx >= augroups.ga_len) /* end of list */
return NULL;
if (AUGROUP_NAME(idx) == NULL || AUGROUP_NAME(idx) == get_deleted_augroup())
/* skip deleted entries */
return (char_u *)"";
return AUGROUP_NAME(idx); /* return a name */
}
static int include_groups = FALSE;
char_u *
set_context_in_autocmd(
expand_T *xp,
char_u *arg,
int doautocmd) /* TRUE for :doauto*, FALSE for :autocmd */
{
char_u *p;
int group;
/* check for a group name, skip it if present */
include_groups = FALSE;
p = arg;
group = au_get_grouparg(&arg);
if (group == AUGROUP_ERROR)
return NULL;
/* If there only is a group name that's what we expand. */
if (*arg == NUL && group != AUGROUP_ALL && !VIM_ISWHITE(arg[-1]))
{
arg = p;
group = AUGROUP_ALL;
}
/* skip over event name */
for (p = arg; *p != NUL && !VIM_ISWHITE(*p); ++p)
if (*p == ',')
arg = p + 1;
if (*p == NUL)
{
if (group == AUGROUP_ALL)
include_groups = TRUE;
xp->xp_context = EXPAND_EVENTS; /* expand event name */
xp->xp_pattern = arg;
return NULL;
}
/* skip over pattern */
arg = skipwhite(p);
while (*arg && (!VIM_ISWHITE(*arg) || arg[-1] == '\\'))
arg++;
if (*arg)
return arg; /* expand (next) command */
if (doautocmd)
xp->xp_context = EXPAND_FILES; /* expand file names */
else
xp->xp_context = EXPAND_NOTHING; /* pattern is not expanded */
return NULL;
}
/*
* Function given to ExpandGeneric() to obtain the list of event names.
*/
char_u *
get_event_name(expand_T *xp UNUSED, int idx)
{
if (idx < augroups.ga_len) /* First list group names, if wanted */
{
if (!include_groups || AUGROUP_NAME(idx) == NULL
|| AUGROUP_NAME(idx) == get_deleted_augroup())
return (char_u *)""; /* skip deleted entries */
return AUGROUP_NAME(idx); /* return a name */
}
return (char_u *)event_names[idx - augroups.ga_len].name;
}
#endif /* FEAT_CMDL_COMPL */
/*
* Return TRUE if autocmd is supported.
*/
int
autocmd_supported(char_u *name)
{
char_u *p;
return (event_name2nr(name, &p) != NUM_EVENTS);
}
/*
* Return TRUE if an autocommand is defined for a group, event and
* pattern: The group can be omitted to accept any group. "event" and "pattern"
* can be NULL to accept any event and pattern. "pattern" can be NULL to accept
* any pattern. Buffer-local patterns <buffer> or <buffer=N> are accepted.
* Used for:
* exists("#Group") or
* exists("#Group#Event") or
* exists("#Group#Event#pat") or
* exists("#Event") or
* exists("#Event#pat")
*/
int
au_exists(char_u *arg)
{
char_u *arg_save;
char_u *pattern = NULL;
char_u *event_name;
char_u *p;
event_T event;
AutoPat *ap;
buf_T *buflocal_buf = NULL;
int group;
int retval = FALSE;
/* Make a copy so that we can change the '#' chars to a NUL. */
arg_save = vim_strsave(arg);
if (arg_save == NULL)
return FALSE;
p = vim_strchr(arg_save, '#');
if (p != NULL)
*p++ = NUL;
/* First, look for an autocmd group name */
group = au_find_group(arg_save);
if (group == AUGROUP_ERROR)
{
/* Didn't match a group name, assume the first argument is an event. */
group = AUGROUP_ALL;
event_name = arg_save;
}
else
{
if (p == NULL)
{
/* "Group": group name is present and it's recognized */
retval = TRUE;
goto theend;
}
/* Must be "Group#Event" or "Group#Event#pat". */
event_name = p;
p = vim_strchr(event_name, '#');
if (p != NULL)
*p++ = NUL; /* "Group#Event#pat" */
}
pattern = p; /* "pattern" is NULL when there is no pattern */
/* find the index (enum) for the event name */
event = event_name2nr(event_name, &p);
/* return FALSE if the event name is not recognized */
if (event == NUM_EVENTS)
goto theend;
/* Find the first autocommand for this event.
* If there isn't any, return FALSE;
* If there is one and no pattern given, return TRUE; */
ap = first_autopat[(int)event];
if (ap == NULL)
goto theend;
/* if pattern is "<buffer>", special handling is needed which uses curbuf */
/* for pattern "<buffer=N>, fnamecmp() will work fine */
if (pattern != NULL && STRICMP(pattern, "<buffer>") == 0)
buflocal_buf = curbuf;
/* Check if there is an autocommand with the given pattern. */
for ( ; ap != NULL; ap = ap->next)
/* only use a pattern when it has not been removed and has commands. */
/* For buffer-local autocommands, fnamecmp() works fine. */
if (ap->pat != NULL && ap->cmds != NULL
&& (group == AUGROUP_ALL || ap->group == group)
&& (pattern == NULL
|| (buflocal_buf == NULL
? fnamecmp(ap->pat, pattern) == 0
: ap->buflocal_nr == buflocal_buf->b_fnum)))
{
retval = TRUE;
break;
}
theend:
vim_free(arg_save);
return retval;
}
#else /* FEAT_AUTOCMD */
/*
* Prepare for executing commands for (hidden) buffer "buf".
* This is the non-autocommand version, it simply saves "curbuf" and sets
* "curbuf" and "curwin" to match "buf".
*/
void
aucmd_prepbuf(
aco_save_T *aco, /* structure to save values in */
buf_T *buf) /* new curbuf */
{
aco->save_curbuf = curbuf;
--curbuf->b_nwindows;
curbuf = buf;
curwin->w_buffer = buf;
++curbuf->b_nwindows;
}
/*
* Restore after executing commands for a (hidden) buffer.
* This is the non-autocommand version.
*/
void
aucmd_restbuf(
aco_save_T *aco) /* structure holding saved values */
{
--curbuf->b_nwindows;
curbuf = aco->save_curbuf;
curwin->w_buffer = curbuf;
++curbuf->b_nwindows;
}
#endif /* FEAT_AUTOCMD */
#if defined(FEAT_AUTOCMD) || defined(FEAT_WILDIGN) || defined(PROTO)
/*
* Try matching a filename with a "pattern" ("prog" is NULL), or use the
* precompiled regprog "prog" ("pattern" is NULL). That avoids calling
* vim_regcomp() often.
* Used for autocommands and 'wildignore'.
* Returns TRUE if there is a match, FALSE otherwise.
*/
static int
match_file_pat(
char_u *pattern, /* pattern to match with */
regprog_T **prog, /* pre-compiled regprog or NULL */
char_u *fname, /* full path of file name */
char_u *sfname, /* short file name or NULL */
char_u *tail, /* tail of path */
int allow_dirs) /* allow matching with dir */
{
regmatch_T regmatch;
int result = FALSE;
regmatch.rm_ic = p_fic; /* ignore case if 'fileignorecase' is set */
if (prog != NULL)
regmatch.regprog = *prog;
else
regmatch.regprog = vim_regcomp(pattern, RE_MAGIC);
/*
* Try for a match with the pattern with:
* 1. the full file name, when the pattern has a '/'.
* 2. the short file name, when the pattern has a '/'.
* 3. the tail of the file name, when the pattern has no '/'.
*/
if (regmatch.regprog != NULL
&& ((allow_dirs
&& (vim_regexec(®match, fname, (colnr_T)0)
|| (sfname != NULL
&& vim_regexec(®match, sfname, (colnr_T)0))))
|| (!allow_dirs && vim_regexec(®match, tail, (colnr_T)0))))
result = TRUE;
if (prog != NULL)
*prog = regmatch.regprog;
else
vim_regfree(regmatch.regprog);
return result;
}
#endif
#if defined(FEAT_WILDIGN) || defined(PROTO)
/*
* Return TRUE if a file matches with a pattern in "list".
* "list" is a comma-separated list of patterns, like 'wildignore'.
* "sfname" is the short file name or NULL, "ffname" the long file name.
*/
int
match_file_list(char_u *list, char_u *sfname, char_u *ffname)
{
char_u buf[100];
char_u *tail;
char_u *regpat;
char allow_dirs;
int match;
char_u *p;
tail = gettail(sfname);
/* try all patterns in 'wildignore' */
p = list;
while (*p)
{
copy_option_part(&p, buf, 100, ",");
regpat = file_pat_to_reg_pat(buf, NULL, &allow_dirs, FALSE);
if (regpat == NULL)
break;
match = match_file_pat(regpat, NULL, ffname, sfname,
tail, (int)allow_dirs);
vim_free(regpat);
if (match)
return TRUE;
}
return FALSE;
}
#endif
/*
* Convert the given pattern "pat" which has shell style wildcards in it, into
* a regular expression, and return the result in allocated memory. If there
* is a directory path separator to be matched, then TRUE is put in
* allow_dirs, otherwise FALSE is put there -- webb.
* Handle backslashes before special characters, like "\*" and "\ ".
*
* Returns NULL when out of memory.
*/
char_u *
file_pat_to_reg_pat(
char_u *pat,
char_u *pat_end, /* first char after pattern or NULL */
char *allow_dirs, /* Result passed back out in here */
int no_bslash UNUSED) /* Don't use a backward slash as pathsep */
{
int size = 2; /* '^' at start, '$' at end */
char_u *endp;
char_u *reg_pat;
char_u *p;
int i;
int nested = 0;
int add_dollar = TRUE;
if (allow_dirs != NULL)
*allow_dirs = FALSE;
if (pat_end == NULL)
pat_end = pat + STRLEN(pat);
for (p = pat; p < pat_end; p++)
{
switch (*p)
{
case '*':
case '.':
case ',':
case '{':
case '}':
case '~':
size += 2; /* extra backslash */
break;
#ifdef BACKSLASH_IN_FILENAME
case '\\':
case '/':
size += 4; /* could become "[\/]" */
break;
#endif
default:
size++;
# ifdef FEAT_MBYTE
if (enc_dbcs != 0 && (*mb_ptr2len)(p) > 1)
{
++p;
++size;
}
# endif
break;
}
}
reg_pat = alloc(size + 1);
if (reg_pat == NULL)
return NULL;
i = 0;
if (pat[0] == '*')
while (pat[0] == '*' && pat < pat_end - 1)
pat++;
else
reg_pat[i++] = '^';
endp = pat_end - 1;
if (endp >= pat && *endp == '*')
{
while (endp - pat > 0 && *endp == '*')
endp--;
add_dollar = FALSE;
}
for (p = pat; *p && nested >= 0 && p <= endp; p++)
{
switch (*p)
{
case '*':
reg_pat[i++] = '.';
reg_pat[i++] = '*';
while (p[1] == '*') /* "**" matches like "*" */
++p;
break;
case '.':
case '~':
reg_pat[i++] = '\\';
reg_pat[i++] = *p;
break;
case '?':
reg_pat[i++] = '.';
break;
case '\\':
if (p[1] == NUL)
break;
#ifdef BACKSLASH_IN_FILENAME
if (!no_bslash)
{
/* translate:
* "\x" to "\\x" e.g., "dir\file"
* "\*" to "\\.*" e.g., "dir\*.c"
* "\?" to "\\." e.g., "dir\??.c"
* "\+" to "\+" e.g., "fileX\+.c"
*/
if ((vim_isfilec(p[1]) || p[1] == '*' || p[1] == '?')
&& p[1] != '+')
{
reg_pat[i++] = '[';
reg_pat[i++] = '\\';
reg_pat[i++] = '/';
reg_pat[i++] = ']';
if (allow_dirs != NULL)
*allow_dirs = TRUE;
break;
}
}
#endif
/* Undo escaping from ExpandEscape():
* foo\?bar -> foo?bar
* foo\%bar -> foo%bar
* foo\,bar -> foo,bar
* foo\ bar -> foo bar
* Don't unescape \, * and others that are also special in a
* regexp.
* An escaped { must be unescaped since we use magic not
* verymagic. Use "\\\{n,m\}"" to get "\{n,m}".
*/
if (*++p == '?'
#ifdef BACKSLASH_IN_FILENAME
&& no_bslash
#endif
)
reg_pat[i++] = '?';
else
if (*p == ',' || *p == '%' || *p == '#'
|| vim_isspace(*p) || *p == '{' || *p == '}')
reg_pat[i++] = *p;
else if (*p == '\\' && p[1] == '\\' && p[2] == '{')
{
reg_pat[i++] = '\\';
reg_pat[i++] = '{';
p += 2;
}
else
{
if (allow_dirs != NULL && vim_ispathsep(*p)
#ifdef BACKSLASH_IN_FILENAME
&& (!no_bslash || *p != '\\')
#endif
)
*allow_dirs = TRUE;
reg_pat[i++] = '\\';
reg_pat[i++] = *p;
}
break;
#ifdef BACKSLASH_IN_FILENAME
case '/':
reg_pat[i++] = '[';
reg_pat[i++] = '\\';
reg_pat[i++] = '/';
reg_pat[i++] = ']';
if (allow_dirs != NULL)
*allow_dirs = TRUE;
break;
#endif
case '{':
reg_pat[i++] = '\\';
reg_pat[i++] = '(';
nested++;
break;
case '}':
reg_pat[i++] = '\\';
reg_pat[i++] = ')';
--nested;
break;
case ',':
if (nested)
{
reg_pat[i++] = '\\';
reg_pat[i++] = '|';
}
else
reg_pat[i++] = ',';
break;
default:
# ifdef FEAT_MBYTE
if (enc_dbcs != 0 && (*mb_ptr2len)(p) > 1)
reg_pat[i++] = *p++;
else
# endif
if (allow_dirs != NULL && vim_ispathsep(*p))
*allow_dirs = TRUE;
reg_pat[i++] = *p;
break;
}
}
if (add_dollar)
reg_pat[i++] = '$';
reg_pat[i] = NUL;
if (nested != 0)
{
if (nested < 0)
EMSG(_("E219: Missing {."));
else
EMSG(_("E220: Missing }."));
vim_free(reg_pat);
reg_pat = NULL;
}
return reg_pat;
}
#if defined(EINTR) || defined(PROTO)
/*
* Version of read() that retries when interrupted by EINTR (possibly
* by a SIGWINCH).
*/
long
read_eintr(int fd, void *buf, size_t bufsize)
{
long ret;
for (;;)
{
ret = vim_read(fd, buf, bufsize);
if (ret >= 0 || errno != EINTR)
break;
}
return ret;
}
/*
* Version of write() that retries when interrupted by EINTR (possibly
* by a SIGWINCH).
*/
long
write_eintr(int fd, void *buf, size_t bufsize)
{
long ret = 0;
long wlen;
/* Repeat the write() so long it didn't fail, other than being interrupted
* by a signal. */
while (ret < (long)bufsize)
{
wlen = vim_write(fd, (char *)buf + ret, bufsize - ret);
if (wlen < 0)
{
if (errno != EINTR)
break;
}
else
ret += wlen;
}
return ret;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_2958_1 |
crossvul-cpp_data_good_295_0 | /* $OpenBSD: auth2-gss.c,v 1.29 2018/07/31 03:10:27 djm Exp $ */
/*
* Copyright (c) 2001-2003 Simon Wilkinson. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef GSSAPI
#include <sys/types.h>
#include "xmalloc.h"
#include "sshkey.h"
#include "hostfile.h"
#include "auth.h"
#include "ssh2.h"
#include "log.h"
#include "dispatch.h"
#include "sshbuf.h"
#include "ssherr.h"
#include "servconf.h"
#include "packet.h"
#include "ssh-gss.h"
#include "monitor_wrap.h"
extern ServerOptions options;
static int input_gssapi_token(int type, u_int32_t plen, struct ssh *ssh);
static int input_gssapi_mic(int type, u_int32_t plen, struct ssh *ssh);
static int input_gssapi_exchange_complete(int type, u_int32_t plen, struct ssh *ssh);
static int input_gssapi_errtok(int, u_int32_t, struct ssh *);
/*
* We only support those mechanisms that we know about (ie ones that we know
* how to check local user kuserok and the like)
*/
static int
userauth_gssapi(struct ssh *ssh)
{
Authctxt *authctxt = ssh->authctxt;
gss_OID_desc goid = {0, NULL};
Gssctxt *ctxt = NULL;
int r, present;
u_int mechs;
OM_uint32 ms;
size_t len;
u_char *doid = NULL;
if ((r = sshpkt_get_u32(ssh, &mechs)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
if (mechs == 0) {
debug("Mechanism negotiation is not supported");
return (0);
}
do {
mechs--;
free(doid);
present = 0;
if ((r = sshpkt_get_string(ssh, &doid, &len)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
if (len > 2 && doid[0] == SSH_GSS_OIDTYPE &&
doid[1] == len - 2) {
goid.elements = doid + 2;
goid.length = len - 2;
ssh_gssapi_test_oid_supported(&ms, &goid, &present);
} else {
logit("Badly formed OID received");
}
} while (mechs > 0 && !present);
if (!present) {
free(doid);
authctxt->server_caused_failure = 1;
return (0);
}
if (!authctxt->valid || authctxt->user == NULL) {
debug2("%s: disabled because of invalid user", __func__);
free(doid);
return (0);
}
if (GSS_ERROR(PRIVSEP(ssh_gssapi_server_ctx(&ctxt, &goid)))) {
if (ctxt != NULL)
ssh_gssapi_delete_ctx(&ctxt);
free(doid);
authctxt->server_caused_failure = 1;
return (0);
}
authctxt->methoddata = (void *)ctxt;
/* Return the OID that we received */
if ((r = sshpkt_start(ssh, SSH2_MSG_USERAUTH_GSSAPI_RESPONSE)) != 0 ||
(r = sshpkt_put_string(ssh, doid, len)) != 0 ||
(r = sshpkt_send(ssh)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
free(doid);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_TOKEN, &input_gssapi_token);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_ERRTOK, &input_gssapi_errtok);
authctxt->postponed = 1;
return (0);
}
static int
input_gssapi_token(int type, u_int32_t plen, struct ssh *ssh)
{
Authctxt *authctxt = ssh->authctxt;
Gssctxt *gssctxt;
gss_buffer_desc send_tok = GSS_C_EMPTY_BUFFER;
gss_buffer_desc recv_tok;
OM_uint32 maj_status, min_status, flags;
u_char *p;
size_t len;
int r;
if (authctxt == NULL || (authctxt->methoddata == NULL && !use_privsep))
fatal("No authentication or GSSAPI context");
gssctxt = authctxt->methoddata;
if ((r = sshpkt_get_string(ssh, &p, &len)) != 0 ||
(r = sshpkt_get_end(ssh)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
recv_tok.value = p;
recv_tok.length = len;
maj_status = PRIVSEP(ssh_gssapi_accept_ctx(gssctxt, &recv_tok,
&send_tok, &flags));
free(p);
if (GSS_ERROR(maj_status)) {
if (send_tok.length != 0) {
if ((r = sshpkt_start(ssh,
SSH2_MSG_USERAUTH_GSSAPI_ERRTOK)) != 0 ||
(r = sshpkt_put_string(ssh, send_tok.value,
send_tok.length)) != 0 ||
(r = sshpkt_send(ssh)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
}
authctxt->postponed = 0;
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_TOKEN, NULL);
userauth_finish(ssh, 0, "gssapi-with-mic", NULL);
} else {
if (send_tok.length != 0) {
if ((r = sshpkt_start(ssh,
SSH2_MSG_USERAUTH_GSSAPI_TOKEN)) != 0 ||
(r = sshpkt_put_string(ssh, send_tok.value,
send_tok.length)) != 0 ||
(r = sshpkt_send(ssh)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
}
if (maj_status == GSS_S_COMPLETE) {
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_TOKEN, NULL);
if (flags & GSS_C_INTEG_FLAG)
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_MIC,
&input_gssapi_mic);
else
ssh_dispatch_set(ssh,
SSH2_MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE,
&input_gssapi_exchange_complete);
}
}
gss_release_buffer(&min_status, &send_tok);
return 0;
}
static int
input_gssapi_errtok(int type, u_int32_t plen, struct ssh *ssh)
{
Authctxt *authctxt = ssh->authctxt;
Gssctxt *gssctxt;
gss_buffer_desc send_tok = GSS_C_EMPTY_BUFFER;
gss_buffer_desc recv_tok;
OM_uint32 maj_status;
int r;
u_char *p;
size_t len;
if (authctxt == NULL || (authctxt->methoddata == NULL && !use_privsep))
fatal("No authentication or GSSAPI context");
gssctxt = authctxt->methoddata;
if ((r = sshpkt_get_string(ssh, &p, &len)) != 0 ||
(r = sshpkt_get_end(ssh)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
recv_tok.value = p;
recv_tok.length = len;
/* Push the error token into GSSAPI to see what it says */
maj_status = PRIVSEP(ssh_gssapi_accept_ctx(gssctxt, &recv_tok,
&send_tok, NULL));
free(recv_tok.value);
/* We can't return anything to the client, even if we wanted to */
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_TOKEN, NULL);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_ERRTOK, NULL);
/* The client will have already moved on to the next auth */
gss_release_buffer(&maj_status, &send_tok);
return 0;
}
/*
* This is called when the client thinks we've completed authentication.
* It should only be enabled in the dispatch handler by the function above,
* which only enables it once the GSSAPI exchange is complete.
*/
static int
input_gssapi_exchange_complete(int type, u_int32_t plen, struct ssh *ssh)
{
Authctxt *authctxt = ssh->authctxt;
int r, authenticated;
const char *displayname;
if (authctxt == NULL || (authctxt->methoddata == NULL && !use_privsep))
fatal("No authentication or GSSAPI context");
/*
* We don't need to check the status, because we're only enabled in
* the dispatcher once the exchange is complete
*/
if ((r = sshpkt_get_end(ssh)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
authenticated = PRIVSEP(ssh_gssapi_userok(authctxt->user));
if ((!use_privsep || mm_is_monitor()) &&
(displayname = ssh_gssapi_displayname()) != NULL)
auth2_record_info(authctxt, "%s", displayname);
authctxt->postponed = 0;
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_TOKEN, NULL);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_ERRTOK, NULL);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_MIC, NULL);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE, NULL);
userauth_finish(ssh, authenticated, "gssapi-with-mic", NULL);
return 0;
}
static int
input_gssapi_mic(int type, u_int32_t plen, struct ssh *ssh)
{
Authctxt *authctxt = ssh->authctxt;
Gssctxt *gssctxt;
int r, authenticated = 0;
struct sshbuf *b;
gss_buffer_desc mic, gssbuf;
const char *displayname;
u_char *p;
size_t len;
if (authctxt == NULL || (authctxt->methoddata == NULL && !use_privsep))
fatal("No authentication or GSSAPI context");
gssctxt = authctxt->methoddata;
if ((r = sshpkt_get_string(ssh, &p, &len)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
if ((b = sshbuf_new()) == NULL)
fatal("%s: sshbuf_new failed", __func__);
mic.value = p;
mic.length = len;
ssh_gssapi_buildmic(b, authctxt->user, authctxt->service,
"gssapi-with-mic");
if ((gssbuf.value = sshbuf_mutable_ptr(b)) == NULL)
fatal("%s: sshbuf_mutable_ptr failed", __func__);
gssbuf.length = sshbuf_len(b);
if (!GSS_ERROR(PRIVSEP(ssh_gssapi_checkmic(gssctxt, &gssbuf, &mic))))
authenticated = PRIVSEP(ssh_gssapi_userok(authctxt->user));
else
logit("GSSAPI MIC check failed");
sshbuf_free(b);
free(mic.value);
if ((!use_privsep || mm_is_monitor()) &&
(displayname = ssh_gssapi_displayname()) != NULL)
auth2_record_info(authctxt, "%s", displayname);
authctxt->postponed = 0;
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_TOKEN, NULL);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_ERRTOK, NULL);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_MIC, NULL);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE, NULL);
userauth_finish(ssh, authenticated, "gssapi-with-mic", NULL);
return 0;
}
Authmethod method_gssapi = {
"gssapi-with-mic",
userauth_gssapi,
&options.gss_authentication
};
#endif
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_295_0 |
crossvul-cpp_data_good_3568_4 | /**
* Copyright 2008-2011 Digital Bazaar, Inc.
*
* This file is part of librdfa.
*
* librdfa is Free Software, and can be licensed under any of the
* following three licenses:
*
* 1. GNU Lesser General Public License (LGPL) V2.1 or any
* newer version
* 2. GNU General Public License (GPL) V2 or any newer version
* 3. Apache License, V2.0 or any newer version
*
* You may not use this file except in compliance with at least one of
* the above three licenses.
*
* See LICENSE-* at the top of this software distribution for more
* information regarding the details of each license.
*
* The librdfa library is the Fastest RDFa Parser in the Universe. It is
* a stream parser, meaning that it takes an XML data as input and spits
* out RDF triples as it comes across them in the stream. Due to this
* processing approach, librdfa has a very, very small memory footprint.
* It is also very fast and can operate on hundreds of gigabytes of XML
* data without breaking a sweat.
*
* Usage:
*
* rdfacontext* context = rdfa_create_context(BASE_URI);
* context->callback_data = your_user_data;
* rdfa_set_default_graph_triple_handler(context, &default_graph_triple);
* rdfa_set_processor_graph_triple_handler(context, &processor_graph_triple);
* rdfa_set_buffer_filler(context, &fill_buffer);
* rdfa_parse(context);
* rdfa_free_context(context);
*
* @author Manu Sporny
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "rdfa_utils.h"
#include "rdfa.h"
#define READ_BUFFER_SIZE 4096
#define RDFA_DOCTYPE_STRING_LENGTH 103
void rdfa_init_context(rdfacontext* context)
{
// the [parent subject] is set to the [base] value;
context->parent_subject = NULL;
if(context->base != NULL)
{
char* cleaned_base = rdfa_iri_get_base(context->base);
context->parent_subject =
rdfa_replace_string(context->parent_subject, cleaned_base);
free(cleaned_base);
}
// the [parent object] is set to null;
context->parent_object = NULL;
#ifndef LIBRDFA_IN_RAPTOR
// the [list of URI mappings] is cleared;
context->uri_mappings = (char**)rdfa_create_mapping(MAX_URI_MAPPINGS);
#endif
// the [list of incomplete triples] is cleared;
context->incomplete_triples = rdfa_create_list(3);
// the [language] is set to null.
context->language = NULL;
// set the [current object resource] to null;
context->current_object_resource = NULL;
// 1. First, the local values are initialized, as follows:
//
// * the [recurse] flag is set to 'true';
context->recurse = 1;
// * the [skip element] flag is set to 'false';
context->skip_element = 0;
// * [new subject] is set to null;
context->new_subject = NULL;
// * [current object resource] is set to null;
context->current_object_resource = NULL;
// * the [local list of URI mappings] is set to the list of URI
// mappings from the [evaluation context];
// NOTE: This step is done in rdfa_create_new_element_context()
// * the [local list of incomplete triples] is set to null;
context->local_incomplete_triples = rdfa_create_list(3);
// * the [current language] value is set to the [language] value
// from the [evaluation context].
// NOTE: This step is done in rdfa_create_new_element_context()
// The next set of variables are initialized to make the C compiler
// and valgrind happy - they are not a part of the RDFa spec.
context->bnode_count = 0;
context->underscore_colon_bnode_name = NULL;
context->xml_literal_namespaces_defined = 0;
context->xml_literal_xml_lang_defined = 0;
context->content = NULL;
context->datatype = NULL;
context->property = NULL;
context->plain_literal = NULL;
context->plain_literal_size = 0;
context->xml_literal = NULL;
context->xml_literal_size = 0;
// FIXME: completing incomplete triples always happens now, change
// all of the code to reflect that.
//context->callback_data = NULL;
}
/**
* Read the head of the XHTML document and determines the base IRI for
* the document.
*
* @param context the current working context.
* @param working_buffer the current working buffer.
* @param wb_allocated the number of bytes that have been allocated to
* the working buffer.
*
* @return the size of the data available in the working buffer.
*/
static size_t rdfa_init_base(
rdfacontext* context, char** working_buffer, size_t* working_buffer_size,
char* temp_buffer, size_t bytes_read)
{
char* head_end = NULL;
size_t offset = context->wb_position;
size_t needed_size = (offset + bytes_read) - *working_buffer_size;
// search for the end of <head>, stop if <head> was found
// extend the working buffer size
if(needed_size > 0)
{
size_t temp_buffer_size = sizeof(char) * READ_BUFFER_SIZE;
if((size_t)needed_size > temp_buffer_size)
temp_buffer_size += needed_size;
*working_buffer_size += temp_buffer_size;
// +1 for NUL at end, to allow strstr() etc. to work
*working_buffer = (char*)realloc(*working_buffer, *working_buffer_size + 1);
}
// append to the working buffer
memmove(*working_buffer + offset, temp_buffer, bytes_read);
// ensure the buffer is a NUL-terminated string
*(*working_buffer + offset + bytes_read) = '\0';
// search for the end of </head> in
head_end = strstr(*working_buffer, "</head>");
if(head_end == NULL)
head_end = strstr(*working_buffer, "</HEAD>");
context->wb_position += bytes_read;
if(head_end == NULL)
return bytes_read;
// if </head> was found, search for <base and extract the base URI
if(head_end != NULL)
{
char* base_start = strstr(*working_buffer, "<base ");
if(base_start == NULL)
base_start = strstr(*working_buffer, "<BASE ");
if(base_start != NULL)
{
char* href_start = strstr(base_start, "href=");
char sep = href_start[5];
char* uri_start = href_start + 6;
char* uri_end = strchr(uri_start, sep);
if((uri_start != NULL) && (uri_end != NULL))
{
if(*uri_start != sep)
{
size_t uri_size = uri_end - uri_start;
char* temp_uri = (char*)malloc(sizeof(char) * uri_size + 1);
char* cleaned_base;
strncpy(temp_uri, uri_start, uri_size);
temp_uri[uri_size] = '\0';
// TODO: This isn't in the processing rules, should it
// be? Setting current_object_resource will make
// sure that the BASE element is inherited by all
// subcontexts.
cleaned_base = rdfa_iri_get_base(temp_uri);
context->current_object_resource =
rdfa_replace_string(
context->current_object_resource, cleaned_base);
// clean up the base context
context->base =
rdfa_replace_string(context->base, cleaned_base);
free(cleaned_base);
free(temp_uri);
}
}
}
}
return bytes_read;
}
/**
* Creates a new context for the current element by cloning certain
* parts of the old context on the top of the given stack.
*
* @param context_stack the context stack that is associated with this
* processing run.
*/
static rdfacontext* rdfa_create_new_element_context(rdfalist* context_stack)
{
rdfacontext* parent_context = (rdfacontext*)
context_stack->items[context_stack->num_items - 1]->data;
rdfacontext* rval = rdfa_create_context(parent_context->base);
// * Otherwise, the values are:
// * the [ base ] is set to the [ base ] value of the current
// [ evaluation context ];
rval->base = rdfa_replace_string(rval->base, parent_context->base);
rdfa_init_context(rval);
// copy the URI mappings
#ifndef LIBRDFA_IN_RAPTOR
rdfa_free_mapping(rval->uri_mappings);
rval->uri_mappings = rdfa_copy_mapping(parent_context->uri_mappings);
#endif
// inherit the parent context's language
if(parent_context->language != NULL)
{
rval->language =
rdfa_replace_string(rval->language, parent_context->language);
}
// set the callbacks callback
rval->default_graph_triple_callback =
parent_context->default_graph_triple_callback;
rval->processor_graph_triple_callback =
parent_context->processor_graph_triple_callback;
rval->buffer_filler_callback = parent_context->buffer_filler_callback;
// inherit the bnode count, _: bnode name, recurse flag, and state
// of the xml_literal_namespace_insertion
rval->bnode_count = parent_context->bnode_count;
rval->underscore_colon_bnode_name =
rdfa_replace_string(rval->underscore_colon_bnode_name,
parent_context->underscore_colon_bnode_name);
rval->recurse = parent_context->recurse;
rval->skip_element = 0;
rval->callback_data = parent_context->callback_data;
rval->xml_literal_namespaces_defined =
parent_context->xml_literal_namespaces_defined;
rval->xml_literal_xml_lang_defined =
parent_context->xml_literal_xml_lang_defined;
// inherit the parent context's new_subject
// TODO: This is not anywhere in the syntax processing document
//if(parent_context->new_subject != NULL)
//{
// rval->new_subject = rdfa_replace_string(
// rval->new_subject, parent_context->new_subject);
//}
if(parent_context->skip_element == 0)
{
// o the [ parent subject ] is set to the value of [ new subject ],
// if non-null, or the value of the [ parent subject ] of the
// current [ evaluation context ];
if(parent_context->new_subject != NULL)
{
rval->parent_subject = rdfa_replace_string(
rval->parent_subject, parent_context->new_subject);
}
else
{
rval->parent_subject = rdfa_replace_string(
rval->parent_subject, parent_context->parent_subject);
}
// o the [ parent object ] is set to value of [ current object
// resource ], if non-null, or the value of [ new subject ], if
// non-null, or the value of the [ parent subject ] of the
// current [ evaluation context ];
if(parent_context->current_object_resource != NULL)
{
rval->parent_object =
rdfa_replace_string(
rval->parent_object, parent_context->current_object_resource);
}
else if(parent_context->new_subject != NULL)
{
rval->parent_object =
rdfa_replace_string(
rval->parent_object, parent_context->new_subject);
}
else
{
rval->parent_object =
rdfa_replace_string(
rval->parent_object, parent_context->parent_subject);
}
// copy the incomplete triples
if(rval->incomplete_triples != NULL)
{
rdfa_free_list(rval->incomplete_triples);
}
// o the [ list of incomplete triples ] is set to the [ local list
// of incomplete triples ];
rval->incomplete_triples =
rdfa_copy_list(parent_context->local_incomplete_triples);
}
else
{
rval->parent_subject = rdfa_replace_string(
rval->parent_subject, parent_context->parent_subject);
rval->parent_object = rdfa_replace_string(
rval->parent_object, parent_context->parent_object);
// copy the incomplete triples
rdfa_free_list(rval->incomplete_triples);
rval->incomplete_triples =
rdfa_copy_list(parent_context->incomplete_triples);
// copy the local list of incomplete triples
rdfa_free_list(rval->local_incomplete_triples);
rval->local_incomplete_triples =
rdfa_copy_list(parent_context->local_incomplete_triples);
}
#ifdef LIBRDFA_IN_RAPTOR
rval->base_uri = parent_context->base_uri;
rval->sax2 = parent_context->sax2;
rval->namespace_handler = parent_context->namespace_handler;
rval->namespace_handler_user_data = parent_context->namespace_handler_user_data;
#endif
return rval;
}
#ifdef LIBRDFA_IN_RAPTOR
static int
raptor_nspace_compare(const void *a, const void *b)
{
raptor_namespace* ns_a=*(raptor_namespace**)a;
raptor_namespace* ns_b=*(raptor_namespace**)b;
if(!ns_a->prefix)
return 1;
else if(!ns_b->prefix)
return -1;
else
return strcmp((const char*)ns_b->prefix, (const char*)ns_a->prefix);
}
#endif
/**
* Handles the start_element call
*/
static void XMLCALL
start_element(void* user_data, const char* name, const char** attributes)
{
rdfalist* context_stack = (rdfalist*) user_data;
rdfacontext* context = rdfa_create_new_element_context(context_stack);
const char** aptr = attributes;
const char* xml_lang = NULL;
const char* about_curie = NULL;
char* about = NULL;
const char* src_curie = NULL;
char* src = NULL;
const char* type_of_curie = NULL;
rdfalist* type_of = NULL;
const char* rel_curie = NULL;
rdfalist* rel = NULL;
const char* rev_curie = NULL;
rdfalist* rev = NULL;
const char* property_curie = NULL;
rdfalist* property = NULL;
const char* resource_curie = NULL;
char* resource = NULL;
const char* href_curie = NULL;
char* href = NULL;
const char* content = NULL;
const char* datatype_curie = NULL;
char* datatype = NULL;
rdfa_push_item(context_stack, context, RDFALIST_FLAG_CONTEXT);
if(DEBUG)
{
printf("DEBUG: ------- START - %s -------\n", name);
}
// start the XML Literal text
if(context->xml_literal == NULL)
{
context->xml_literal = rdfa_replace_string(context->xml_literal, "<");
context->xml_literal_size = 1;
}
else
{
context->xml_literal = rdfa_n_append_string(
context->xml_literal, &context->xml_literal_size, "<", 1);
}
context->xml_literal = rdfa_n_append_string(
context->xml_literal, &context->xml_literal_size,
name, strlen(name));
if(!context->xml_literal_namespaces_defined)
{
// append namespaces to XML Literal
#ifdef LIBRDFA_IN_RAPTOR
raptor_namespace_stack* nstack = &context->sax2->namespaces;
raptor_namespace* ns;
raptor_namespace** ns_list = NULL;
size_t ns_size;
#else
char** umap = context->uri_mappings;
#endif
char* umap_key = NULL;
char* umap_value = NULL;
// if the namespaces are not defined, then neither is the xml:lang
context->xml_literal_xml_lang_defined = 0;
#ifdef LIBRDFA_IN_RAPTOR
ns_size = 0;
ns_list = raptor_namespace_stack_to_array(nstack, &ns_size);
qsort((void*)ns_list, ns_size, sizeof(raptor_namespace*),
raptor_nspace_compare);
while(ns_size > 0)
#else
while(*umap != NULL)
#endif
{
unsigned char insert_xmlns_definition = 1;
const char* attr = NULL;
// get the next mapping to process
#ifdef LIBRDFA_IN_RAPTOR
ns=ns_list[--ns_size];
umap_key = (char*)raptor_namespace_get_prefix(ns);
if(!umap_key)
umap_key=(char*)XMLNS_DEFAULT_MAPPING;
umap_value = (char*)raptor_uri_as_string(raptor_namespace_get_uri(ns));
#else
rdfa_next_mapping(umap++, &umap_key, &umap_value);
umap++;
#endif
// check to make sure that the namespace isn't already
// defined in the current element.
if(attributes != NULL)
{
const char** attrs = attributes;
while((*attrs != NULL) && insert_xmlns_definition)
{
attr = *attrs++;
// if the attribute is a umap_key, skip the definition
// of the attribute.
if((strcmp(attr, umap_key) == 0) ||
(strcmp(umap_key, XMLNS_DEFAULT_MAPPING) == 0))
{
insert_xmlns_definition = 0;
}
}
}
// if the namespace isn't already defined on the element,
// copy it to the XML Literal string.
if(insert_xmlns_definition)
{
// append the namespace attribute to the XML Literal
context->xml_literal = rdfa_n_append_string(
context->xml_literal, &context->xml_literal_size,
" xmlns", strlen(" xmlns"));
// check to see if we're dumping the standard XHTML namespace or
// a user-defined XML namespace
if(strcmp(umap_key, XMLNS_DEFAULT_MAPPING) != 0)
{
context->xml_literal = rdfa_n_append_string(
context->xml_literal, &context->xml_literal_size, ":", 1);
context->xml_literal = rdfa_n_append_string(
context->xml_literal, &context->xml_literal_size,
umap_key, strlen(umap_key));
}
// append the namespace value
context->xml_literal = rdfa_n_append_string(
context->xml_literal, &context->xml_literal_size, "=\"", 2);
context->xml_literal = rdfa_n_append_string(
context->xml_literal, &context->xml_literal_size,
umap_value, strlen(umap_value));
context->xml_literal = rdfa_n_append_string(
context->xml_literal, &context->xml_literal_size, "\"", 1);
}
} /* end while umap not NULL */
context->xml_literal_namespaces_defined = 1;
#ifdef LIBRDFA_IN_RAPTOR
if(ns_list)
raptor_free_memory(ns_list);
#endif
} /* end if namespaces inserted */
// prepare all of the RDFa-specific attributes we are looking for.
// scan all of the attributes for the RDFa-specific attributes
if(aptr != NULL)
{
while(*aptr != NULL)
{
const char* attr;
const char* value;
char* literal_text;
attr = *aptr++;
value = *aptr++;
// append the attribute-value pair to the XML literal
literal_text = (char*)malloc(strlen(attr) + strlen(value) + 5);
sprintf(literal_text, " %s=\"%s\"", attr, value);
context->xml_literal = rdfa_n_append_string(
context->xml_literal, &context->xml_literal_size,
literal_text, strlen(literal_text));
free(literal_text);
// if xml:lang is defined, ensure that it is not overwritten
if(strcmp(attr, "xml:lang") == 0)
{
context->xml_literal_xml_lang_defined = 1;
}
// process all of the RDFa attributes
if(strcmp(attr, "about") == 0)
{
about_curie = value;
about = rdfa_resolve_curie(
context, about_curie, CURIE_PARSE_ABOUT_RESOURCE);
}
else if(strcmp(attr, "src") == 0)
{
src_curie = value;
src = rdfa_resolve_curie(context, src_curie, CURIE_PARSE_HREF_SRC);
}
else if(strcmp(attr, "typeof") == 0)
{
type_of_curie = value;
type_of = rdfa_resolve_curie_list(
context, type_of_curie,
CURIE_PARSE_INSTANCEOF_DATATYPE);
}
else if(strcmp(attr, "rel") == 0)
{
rel_curie = value;
rel = rdfa_resolve_curie_list(
context, rel_curie, CURIE_PARSE_RELREV);
}
else if(strcmp(attr, "rev") == 0)
{
rev_curie = value;
rev = rdfa_resolve_curie_list(
context, rev_curie, CURIE_PARSE_RELREV);
}
else if(strcmp(attr, "property") == 0)
{
property_curie = value;
property =
rdfa_resolve_curie_list(
context, property_curie, CURIE_PARSE_PROPERTY);
}
else if(strcmp(attr, "resource") == 0)
{
resource_curie = value;
resource = rdfa_resolve_curie(
context, resource_curie, CURIE_PARSE_ABOUT_RESOURCE);
}
else if(strcmp(attr, "href") == 0)
{
href_curie = value;
href =
rdfa_resolve_curie(context, href_curie, CURIE_PARSE_HREF_SRC);
}
else if(strcmp(attr, "content") == 0)
{
content = value;
}
else if(strcmp(attr, "datatype") == 0)
{
datatype_curie = value;
if(strlen(datatype_curie) == 0)
{
datatype = rdfa_replace_string(datatype, "");
}
else
{
datatype = rdfa_resolve_curie(context, datatype_curie,
CURIE_PARSE_INSTANCEOF_DATATYPE);
}
}
#ifndef LIBRDFA_IN_RAPTOR
else if(strcmp(attr, "xml:lang") == 0)
{
xml_lang = value;
}
else if(strstr(attr, "xmlns") != NULL)
{
// 2. Next the [current element] is parsed for
// [URI mapping]s and these are added to the
// [local list of URI mappings]. Note that a
// [URI mapping] will simply overwrite any current
// mapping in the list that has the same name;
rdfa_update_uri_mappings(context, attr, value);
}
#endif
}
}
#ifdef LIBRDFA_IN_RAPTOR
if(context->sax2) {
xml_lang = (const char*)raptor_sax2_inscope_xml_language(context->sax2);
if(!xml_lang)
xml_lang = "";
}
#endif
// check to see if we should append an xml:lang to the XML Literal
// if one is defined in the context and does not exist on the
// element.
if((xml_lang == NULL) && (context->language != NULL) &&
!context->xml_literal_xml_lang_defined)
{
context->xml_literal = rdfa_n_append_string(
context->xml_literal, &context->xml_literal_size,
" xml:lang=\"", strlen(" xml:lang=\""));
context->xml_literal = rdfa_n_append_string(
context->xml_literal, &context->xml_literal_size,
context->language, strlen(context->language));
context->xml_literal = rdfa_n_append_string(
context->xml_literal, &context->xml_literal_size, "\"", 1);
// ensure that the lang isn't set in a subtree (unless it's overwritten)
context->xml_literal_xml_lang_defined = 1;
}
// close the XML Literal value
context->xml_literal = rdfa_n_append_string(
context->xml_literal, &context->xml_literal_size, ">", 1);
// 3. The [current element] is also parsed for any language
// information, and [language] is set in the [current
// evaluation context];
rdfa_update_language(context, xml_lang);
/***************** FOR DEBUGGING PURPOSES ONLY ******************/
if(DEBUG)
{
if(about != NULL)
{
printf("DEBUG: @about = %s\n", about);
}
if(src != NULL)
{
printf("DEBUG: @src = %s\n", src);
}
if(type_of != NULL)
{
printf("DEBUG: @type_of = ");
rdfa_print_list(type_of);
}
if(rel != NULL)
{
printf("DEBUG: @rel = ");
rdfa_print_list(rel);
}
if(rev != NULL)
{
printf("DEBUG: @rev = ");
rdfa_print_list(rev);
}
if(property != NULL)
{
printf("DEBUG: @property = ");
rdfa_print_list(property);
}
if(resource != NULL)
{
printf("DEBUG: @resource = %s\n", resource);
}
if(href != NULL)
{
printf("DEBUG: @href = %s\n", href);
}
if(content != NULL)
{
printf("DEBUG: @content = %s\n", content);
}
if(datatype != NULL)
{
printf("DEBUG: @datatype = %s\n", datatype);
}
}
// TODO: This isn't part of the processing model, it needs to be
// included and is a correction for the last item in step #4.
if((about == NULL) && (src == NULL) && (type_of == NULL) &&
(rel == NULL) && (rev == NULL) && (property == NULL) &&
(resource == NULL) && (href == NULL))
{
context->skip_element = 1;
}
if((rel == NULL) && (rev == NULL))
{
// 4. If the [current element] contains no valid @rel or @rev
// URI, obtained according to the section on CURIE and URI
// Processing, then the next step is to establish a value for
// [new subject]. Any of the attributes that can carry a
// resource can set [new subject];
rdfa_establish_new_subject(
context, name, about, src, resource, href, type_of);
}
else
{
// 5. If the [current element] does contain a valid @rel or @rev
// URI, obtained according to the section on CURIE and URI
// Processing, then the next step is to establish both a value
// for [new subject] and a value for [current object resource]:
rdfa_establish_new_subject_with_relrev(
context, name, about, src, resource, href, type_of);
}
if(context->new_subject != NULL)
{
if(DEBUG)
{
printf("DEBUG: new_subject = %s\n", context->new_subject);
}
// 6. If in any of the previous steps a [new subject] was set to
// a non-null value,
// it is now used to provide a subject for type values;
if(type_of != NULL)
{
rdfa_complete_type_triples(context, type_of);
}
// Note that none of this block is executed if there is no
// [new subject] value, i.e., [new subject] remains null.
}
if(context->current_object_resource != NULL)
{
// 7. If in any of the previous steps a [current object resource]
// was set to a non-null value, it is now used to generate triples
rdfa_complete_relrev_triples(context, rel, rev);
}
else if((rel != NULL) || (rev != NULL))
{
// 8. If however [current object resource] was set to null, but
// there are predicates present, then they must be stored as
// [incomplete triple]s, pending the discovery of a subject that
// can be used as the object. Also, [current object resource]
// should be set to a newly created [bnode]
rdfa_save_incomplete_triples(context, rel, rev);
}
// Ensure to re-insert XML Literal namespace information from this
// point on...
if(property != NULL)
{
context->xml_literal_namespaces_defined = 0;
}
// save these for processing steps #9 and #10
context->property = property;
context->content = rdfa_replace_string(context->datatype, content);
context->datatype = rdfa_replace_string(context->datatype, datatype);
// free the resolved CURIEs
free(about);
free(src);
rdfa_free_list(type_of);
rdfa_free_list(rel);
rdfa_free_list(rev);
free(resource);
free(href);
free(datatype);
}
static void XMLCALL character_data(void *user_data, const char *s, int len)
{
rdfalist* context_stack = (rdfalist*)user_data;
rdfacontext* context = (rdfacontext*)
context_stack->items[context_stack->num_items - 1]->data;
char *buffer = (char*)malloc(len + 1);
memset(buffer, 0, len + 1);
memcpy(buffer, s, len);
// append the text to the current context's plain literal
if(context->plain_literal == NULL)
{
context->plain_literal =
rdfa_replace_string(context->plain_literal, buffer);
context->plain_literal_size = len;
}
else
{
context->plain_literal = rdfa_n_append_string(
context->plain_literal, &context->plain_literal_size, buffer, len);
}
// append the text to the current context's XML literal
if(context->xml_literal == NULL)
{
context->xml_literal =
rdfa_replace_string(context->xml_literal, buffer);
context->xml_literal_size = len;
}
else
{
context->xml_literal = rdfa_n_append_string(
context->xml_literal, &context->xml_literal_size, buffer, len);
}
//printf("plain_literal: %s\n", context->plain_literal);
//printf("xml_literal: %s\n", context->xml_literal);
free(buffer);
}
static void XMLCALL
end_element(void *user_data, const char *name)
{
rdfalist* context_stack = (rdfalist*)user_data;
rdfacontext* context = (rdfacontext*)rdfa_pop_item(context_stack);
rdfacontext* parent_context = (rdfacontext*)
context_stack->items[context_stack->num_items - 1]->data;
// append the text to the current context's XML literal
char* buffer = (char*)malloc(strlen(name) + 4);
if(DEBUG)
{
printf("DEBUG: </%s>\n", name);
}
sprintf(buffer, "</%s>", name);
if(context->xml_literal == NULL)
{
context->xml_literal =
rdfa_replace_string(context->xml_literal, buffer);
context->xml_literal_size = strlen(buffer);
}
else
{
context->xml_literal = rdfa_n_append_string(
context->xml_literal, &context->xml_literal_size,
buffer, strlen(buffer));
}
free(buffer);
// 9. The next step of the iteration is to establish any
// [current object literal];
// generate the complete object literal triples
if(context->property != NULL)
{
// save the current xml literal
char* saved_xml_literal = context->xml_literal;
char* content_start = NULL;
char* content_end = NULL;
// ensure to mark only the inner-content of the XML node for
// processing the object literal.
buffer = NULL;
if(context->xml_literal != NULL)
{
// get the data between the first tag and the last tag
content_start = strchr(context->xml_literal, '>');
content_end = strrchr(context->xml_literal, '<');
if((content_start != NULL) && (content_end != NULL))
{
// set content end to null terminator
context->xml_literal = ++content_start;
*content_end = '\0';
}
}
// update the plain literal if the XML Literal is an empty string
if(strlen(context->xml_literal) == 0)
{
context->plain_literal =
rdfa_replace_string(context->plain_literal, "");
}
// process data between first tag and last tag
// this needs the xml literal to be null terminated
rdfa_complete_object_literal_triples(context);
if(content_end != NULL)
{
// set content end back
*content_end = '<';
}
if(saved_xml_literal != NULL)
{
// restore xml literal
context->xml_literal = saved_xml_literal;
}
}
//printf(context->plain_literal);
// append the XML literal and plain text literals to the parent
// literals
if(context->xml_literal != NULL)
{
if(parent_context->xml_literal == NULL)
{
parent_context->xml_literal =
rdfa_replace_string(
parent_context->xml_literal, context->xml_literal);
parent_context->xml_literal_size = context->xml_literal_size;
}
else
{
parent_context->xml_literal =
rdfa_n_append_string(
parent_context->xml_literal, &parent_context->xml_literal_size,
context->xml_literal, context->xml_literal_size);
}
// if there is an XML literal, there is probably a plain literal
if(context->plain_literal != NULL)
{
if(parent_context->plain_literal == NULL)
{
parent_context->plain_literal =
rdfa_replace_string(
parent_context->plain_literal, context->plain_literal);
parent_context->plain_literal_size = context->plain_literal_size;
}
else
{
parent_context->plain_literal =
rdfa_n_append_string(
parent_context->plain_literal,
&parent_context->plain_literal_size,
context->plain_literal,
context->plain_literal_size);
}
}
}
// preserve the bnode count by copying it to the parent_context
parent_context->bnode_count = context->bnode_count;
parent_context->underscore_colon_bnode_name = \
rdfa_replace_string(parent_context->underscore_colon_bnode_name,
context->underscore_colon_bnode_name);
// 10. If the [ skip element ] flag is 'false', and [ new subject ]
// was set to a non-null value, then any [ incomplete triple ]s
// within the current context should be completed:
if((context->skip_element == 0) && (context->new_subject != NULL))
{
rdfa_complete_incomplete_triples(context);
}
// free the context
rdfa_free_context(context);
}
#ifdef LIBRDFA_IN_RAPTOR
static void raptor_rdfa_start_element(void *user_data,
raptor_xml_element *xml_element)
{
raptor_qname* qname=raptor_xml_element_get_name(xml_element);
int attr_count=raptor_xml_element_get_attributes_count(xml_element);
raptor_qname** attrs=raptor_xml_element_get_attributes(xml_element);
unsigned char* qname_string=raptor_qname_to_counted_name(qname, NULL);
char** attr=NULL;
int i;
if(attr_count > 0) {
attr=(char**)malloc(sizeof(char*) * (1+(attr_count*2)));
for(i=0; i<attr_count; i++) {
attr[2*i]=(char*)raptor_qname_to_counted_name(attrs[i], NULL);
attr[1+(2*i)]=(char*)raptor_qname_get_value(attrs[i]);
}
attr[2*i]=NULL;
}
start_element(user_data, (char*)qname_string, (const char**)attr);
raptor_free_memory(qname_string);
if(attr) {
for(i=0; i<attr_count; i++)
raptor_free_memory(attr[2*i]);
free(attr);
}
}
static void raptor_rdfa_end_element(void *user_data,
raptor_xml_element* xml_element)
{
raptor_qname* qname=raptor_xml_element_get_name(xml_element);
unsigned char* qname_string=raptor_qname_to_counted_name(qname, NULL);
end_element(user_data, (const char*)qname_string);
raptor_free_memory(qname_string);
}
static void raptor_rdfa_character_data(void *user_data,
raptor_xml_element* xml_element,
const unsigned char *s, int len)
{
character_data(user_data, (const char *)s, len);
}
static void raptor_rdfa_namespace_handler(void *user_data,
raptor_namespace* nspace)
{
rdfalist* context_stack = (rdfalist*)user_data;
rdfacontext* context = (rdfacontext*)
context_stack->items[context_stack->num_items - 1]->data;
if(context->namespace_handler)
(*context->namespace_handler)(context->namespace_handler_user_data,
nspace);
}
#endif
rdfacontext* rdfa_create_context(const char* base)
{
rdfacontext* rval = NULL;
size_t base_length = strlen(base);
// if the base isn't specified, don't create a context
if(base_length > 0)
{
char* cleaned_base;
rval = (rdfacontext*)malloc(sizeof(rdfacontext));
rval->base = NULL;
cleaned_base = rdfa_iri_get_base(base);
rval->base = rdfa_replace_string(rval->base, cleaned_base);
free(cleaned_base);
// no callbacks set yet
rval->default_graph_triple_callback = NULL;
rval->buffer_filler_callback = NULL;
rval->processor_graph_triple_callback = NULL;
rval->callback_data = NULL;
/* parse state */
rval->wb_allocated = 0;
rval->working_buffer = NULL;
rval->wb_position = 0;
#ifdef LIBRDFA_IN_RAPTOR
rval->base_uri = NULL;
rval->sax2 = NULL;
rval->namespace_handler = NULL;
rval->namespace_handler_user_data = NULL;
#else
rval->uri_mappings = NULL;
rval->parser = NULL;
#endif
rval->done = 0;
rval->context_stack = NULL;
rval->wb_preread = 0;
rval->preread = 0;
}
else
{
printf("librdfa error: Failed to create a parsing context, "
"base IRI was not specified!\n");
}
return rval;
}
static void rdfa_free_context_stack(rdfacontext* context)
{
// this field is not NULL only on the rdfacontext* at the top of the stack
if(context->context_stack != NULL)
{
void* rval;
// free the stack ensuring that we do not delete this context if
// it is in the list (which it may be, if parsing ended on error)
do
{
rval = rdfa_pop_item(context->context_stack);
if(rval && rval != context)
{
rdfa_free_context((rdfacontext*)rval);
}
}
while(rval);
free(context->context_stack->items);
free(context->context_stack);
context->context_stack = NULL;
}
}
void rdfa_free_context(rdfacontext* context)
{
free(context->base);
free(context->parent_subject);
free(context->parent_object);
#ifndef LIBRDFA_IN_RAPTOR
rdfa_free_mapping(context->uri_mappings);
#endif
rdfa_free_list(context->incomplete_triples);
free(context->language);
free(context->underscore_colon_bnode_name);
free(context->new_subject);
free(context->current_object_resource);
free(context->content);
free(context->datatype);
rdfa_free_list(context->property);
free(context->plain_literal);
free(context->xml_literal);
// TODO: These should be moved into their own data structure
rdfa_free_list(context->local_incomplete_triples);
rdfa_free_context_stack(context);
free(context->working_buffer);
free(context);
}
void rdfa_set_default_graph_triple_handler(
rdfacontext* context, triple_handler_fp th)
{
context->default_graph_triple_callback = th;
}
void rdfa_set_processor_graph_triple_handler(
rdfacontext* context, triple_handler_fp th)
{
context->processor_graph_triple_callback = th;
}
void rdfa_set_buffer_filler(rdfacontext* context, buffer_filler_fp bf)
{
context->buffer_filler_callback = bf;
}
int rdfa_parse_start(rdfacontext* context)
{
// create the buffers and expat parser
int rval = RDFA_PARSE_SUCCESS;
context->wb_allocated = sizeof(char) * READ_BUFFER_SIZE;
// +1 for NUL at end, to allow strstr() etc. to work
// malloc - only the first char needs to be NUL
context->working_buffer = (char*)malloc(context->wb_allocated + 1);
*context->working_buffer = '\0';
#ifndef LIBRDFA_IN_RAPTOR
context->parser = XML_ParserCreate(NULL);
#endif
context->done = 0;
context->context_stack = rdfa_create_list(32);
// initialize the context stack
rdfa_push_item(context->context_stack, context, RDFALIST_FLAG_CONTEXT);
#ifdef LIBRDFA_IN_RAPTOR
context->sax2 = raptor_new_sax2(context->world, context->locator,
context->context_stack);
#else
#endif
// set up the context stack
#ifdef LIBRDFA_IN_RAPTOR
raptor_sax2_set_start_element_handler(context->sax2,
raptor_rdfa_start_element);
raptor_sax2_set_end_element_handler(context->sax2,
raptor_rdfa_end_element);
raptor_sax2_set_characters_handler(context->sax2,
raptor_rdfa_character_data);
raptor_sax2_set_namespace_handler(context->sax2,
raptor_rdfa_namespace_handler);
#else
XML_SetUserData(context->parser, context->context_stack);
XML_SetElementHandler(context->parser, start_element, end_element);
XML_SetCharacterDataHandler(context->parser, character_data);
#endif
rdfa_init_context(context);
#ifdef LIBRDFA_IN_RAPTOR
if(1) {
raptor_parser* rdf_parser = (raptor_parser*)context->callback_data;
/* Optionally forbid internal network and file requests in the
* XML parser
*/
raptor_sax2_set_option(context->sax2,
RAPTOR_OPTION_NO_NET, NULL,
RAPTOR_OPTIONS_GET_NUMERIC(rdf_parser, RAPTOR_OPTION_NO_NET));
raptor_sax2_set_option(context->sax2,
RAPTOR_OPTION_NO_FILE, NULL,
RAPTOR_OPTIONS_GET_NUMERIC(rdf_parser, RAPTOR_OPTION_NO_FILE));
raptor_sax2_set_option(context->sax2,
RAPTOR_OPTION_LOAD_EXTERNAL_ENTITIES, NULL,
RAPTOR_OPTIONS_GET_NUMERIC(rdf_parser, RAPTOR_OPTION_LOAD_EXTERNAL_ENTITIES));
if(rdf_parser->uri_filter)
raptor_sax2_set_uri_filter(context->sax2, rdf_parser->uri_filter,
rdf_parser->uri_filter_user_data);
}
context->base_uri=raptor_new_uri(context->sax2->world, (const unsigned char*)context->base);
raptor_sax2_parse_start(context->sax2, context->base_uri);
#endif
return rval;
}
static int rdfa_process_doctype(rdfacontext* context, size_t* bytes)
{
int rval = 0;
char* doctype_position = 0;
char* doctype_buffer;
const char* new_doctype =
"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML+RDFa 1.0//EN\" "
"\"http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd\">";
// Create a working buffer for finding the DOCTYPE
doctype_buffer = (char*)malloc(*bytes + 2);
memcpy(doctype_buffer, context->working_buffer, *bytes);
doctype_buffer[*bytes + 1] = '\0';
doctype_position = strstr(doctype_buffer, "<!DOCTYPE");
// if a doctype declaration was found, attempt to replace it
if(doctype_position != NULL)
{
char* new_doctype_buffer = NULL;
size_t new_doctype_buffer_length = 0;
char* doctype_end = strchr(doctype_position, '>');
// make sure that the end of the doctype declaration can be found
if(doctype_end != NULL)
{
size_t bytes_to_copy = 0;
size_t total_bytes = 0;
// create the new doctype buffer
bytes_to_copy = doctype_position - doctype_buffer;
new_doctype_buffer = rdfa_n_append_string(new_doctype_buffer,
&new_doctype_buffer_length, doctype_buffer, bytes_to_copy);
total_bytes += bytes_to_copy;
bytes_to_copy = RDFA_DOCTYPE_STRING_LENGTH;
new_doctype_buffer = rdfa_n_append_string(new_doctype_buffer,
&new_doctype_buffer_length, new_doctype, bytes_to_copy);
total_bytes += bytes_to_copy;
bytes_to_copy = *bytes - ((doctype_end + 1) - doctype_buffer);
new_doctype_buffer = rdfa_n_append_string(new_doctype_buffer,
&new_doctype_buffer_length, doctype_end + 1, bytes_to_copy);
total_bytes += bytes_to_copy;
// replace the old working buffer with the new doctype buffer
free(context->working_buffer);
context->working_buffer = new_doctype_buffer;
context->wb_position = total_bytes;
context->wb_allocated = total_bytes;
*bytes = context->wb_allocated;
rval = 1;
}
}
else
{
char* new_doctype_buffer = NULL;
size_t new_doctype_buffer_length = 0;
// find where the HTML element begins
char* html_position = strstr(doctype_buffer, "<html");
if(html_position == NULL)
{
html_position = strstr(doctype_buffer, "<HTML");
}
if(html_position != NULL)
{
size_t bytes_to_copy = 0;
size_t total_bytes = 0;
// create the new doctype buffer
bytes_to_copy = html_position - doctype_buffer;
new_doctype_buffer = rdfa_n_append_string(new_doctype_buffer,
&new_doctype_buffer_length, doctype_buffer, bytes_to_copy);
total_bytes += bytes_to_copy;
bytes_to_copy = RDFA_DOCTYPE_STRING_LENGTH;
new_doctype_buffer = rdfa_n_append_string(new_doctype_buffer,
&new_doctype_buffer_length, new_doctype, bytes_to_copy);
total_bytes += bytes_to_copy;
bytes_to_copy = 1;
new_doctype_buffer = rdfa_n_append_string(new_doctype_buffer,
&new_doctype_buffer_length, "\n", bytes_to_copy);
total_bytes += bytes_to_copy;
bytes_to_copy = *bytes - (html_position - doctype_buffer);
new_doctype_buffer = rdfa_n_append_string(new_doctype_buffer,
&new_doctype_buffer_length, html_position, bytes_to_copy);
total_bytes += bytes_to_copy;
// replace the old working buffer with the new doctype buffer
free(context->working_buffer);
context->working_buffer = new_doctype_buffer;
context->wb_position = total_bytes;
context->wb_allocated = total_bytes;
*bytes = context->wb_allocated;
rval = 1;
}
}
free(doctype_buffer);
return rval;
}
#ifndef LIBRDFA_IN_RAPTOR
static void rdfa_report_error(rdfacontext* context, char* data, size_t length)
{
char* buffer = malloc(2<<12);
snprintf(buffer, 2<<12, "XML parsing error: %s at line %d, column %d.",
XML_ErrorString(XML_GetErrorCode(context->parser)),
(int)XML_GetCurrentLineNumber(context->parser),
(int)XML_GetCurrentColumnNumber(context->parser));
if(context->processor_graph_triple_callback != NULL)
{
char* error_subject = rdfa_create_bnode(context);
char* pointer_subject = rdfa_create_bnode(context);
// generate the RDFa Processing Graph error type triple
rdftriple* triple = rdfa_create_triple(
error_subject, "http://www.w3.org/1999/02/22-rdf-syntax-ns#type",
"http://www.w3.org/ns/rdfa_processing_graph#Error",
RDF_TYPE_IRI, NULL, NULL);
context->processor_graph_triple_callback(triple, context->callback_data);
// generate the error description
triple = rdfa_create_triple(
error_subject, "http://purl.org/dc/terms/description", buffer,
RDF_TYPE_PLAIN_LITERAL, NULL, "en");
context->processor_graph_triple_callback(triple, context->callback_data);
// generate the context triple for the error
triple = rdfa_create_triple(
error_subject, "http://www.w3.org/ns/rdfa_processing_graph#context",
pointer_subject, RDF_TYPE_IRI, NULL, NULL);
context->processor_graph_triple_callback(triple, context->callback_data);
// generate the type for the context triple
triple = rdfa_create_triple(
pointer_subject, "http://www.w3.org/1999/02/22-rdf-syntax-ns#type",
"http://www.w3.org/2009/pointers#LineCharPointer",
RDF_TYPE_IRI, NULL, NULL);
context->processor_graph_triple_callback(triple, context->callback_data);
// generate the line number
snprintf(buffer, 2<<12, "%d",
(int)XML_GetCurrentLineNumber(context->parser));
triple = rdfa_create_triple(
pointer_subject, "http://www.w3.org/2009/pointers#lineNumber",
buffer, RDF_TYPE_TYPED_LITERAL,
"http://www.w3.org/2001/XMLSchema#positiveInteger", NULL);
context->processor_graph_triple_callback(triple, context->callback_data);
// generate the column number
snprintf(buffer, 2<<12, "%d",
(int)XML_GetCurrentColumnNumber(context->parser));
triple = rdfa_create_triple(
pointer_subject, "http://www.w3.org/2009/pointers#charNumber",
buffer, RDF_TYPE_TYPED_LITERAL,
"http://www.w3.org/2001/XMLSchema#positiveInteger", NULL);
context->processor_graph_triple_callback(triple, context->callback_data);
free(error_subject);
free(pointer_subject);
}
else
{
printf("librdfa processor error: %s\n", buffer);
}
free(buffer);
}
#endif
int rdfa_parse_chunk(rdfacontext* context, char* data, size_t wblen, int done)
{
// it is an error to call this before rdfa_parse_start()
if(context->done)
{
return RDFA_PARSE_FAILED;
}
if(!context->preread)
{
// search for the <base> tag and use the href contained therein to
// set the parsing context.
context->wb_preread = rdfa_init_base(context,
&context->working_buffer, &context->wb_allocated, data, wblen);
// continue looking if in first 131072 bytes of data
if(!context->base && context->wb_preread < (1<<17))
return RDFA_PARSE_SUCCESS;
// process the document's DOCTYPE
rdfa_process_doctype(context, &wblen);
#ifdef LIBRDFA_IN_RAPTOR
if(raptor_sax2_parse_chunk(context->sax2,
(const unsigned char*)context->working_buffer,
context->wb_position, done))
{
return RDFA_PARSE_FAILED;
}
#else
if(XML_Parse(context->parser, context->working_buffer,
context->wb_position, 0) == XML_STATUS_ERROR)
{
rdfa_report_error(context, data, wblen);
return RDFA_PARSE_FAILED;
}
#endif
context->preread = 1;
return RDFA_PARSE_SUCCESS;
}
// otherwise just parse the block passed in
#ifdef LIBRDFA_IN_RAPTOR
if(raptor_sax2_parse_chunk(context->sax2, (const unsigned char*)data, wblen, done))
{
return RDFA_PARSE_FAILED;
}
#else
if(XML_Parse(context->parser, data, wblen, done) == XML_STATUS_ERROR)
{
rdfa_report_error(context, data, wblen);
return RDFA_PARSE_FAILED;
}
#endif
return RDFA_PARSE_SUCCESS;
}
void rdfa_parse_end(rdfacontext* context)
{
// free context stack
rdfa_free_context_stack(context);
// Free the expat parser and the like
#ifdef LIBRDFA_IN_RAPTOR
if(context->base_uri)
raptor_free_uri(context->base_uri);
raptor_free_sax2(context->sax2);
context->sax2=NULL;
#else
// free parser
XML_ParserFree(context->parser);
#endif
}
char* rdfa_get_buffer(rdfacontext* context, size_t* blen)
{
*blen = context->wb_allocated;
return context->working_buffer;
}
int rdfa_parse_buffer(rdfacontext* context, size_t bytes)
{
int rval;
int done;
done = (bytes == 0);
rval = rdfa_parse_chunk(context, context->working_buffer, bytes, done);
context->done = done;
return rval;
}
int rdfa_parse(rdfacontext* context)
{
int rval;
rval = rdfa_parse_start(context);
if(rval != RDFA_PARSE_SUCCESS)
{
context->done = 1;
return rval;
}
do
{
size_t wblen;
int done;
wblen = context->buffer_filler_callback(
context->working_buffer, context->wb_allocated,
context->callback_data);
done = (wblen == 0);
rval = rdfa_parse_chunk(context, context->working_buffer, wblen, done);
context->done=done;
}
while(!context->done && rval == RDFA_PARSE_SUCCESS);
rdfa_parse_end(context);
return rval;
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_3568_4 |
crossvul-cpp_data_bad_1829_2 | /***********************************************************************/
/* */
/* OCaml */
/* */
/* Xavier Leroy and Damien Doligez, INRIA Rocquencourt */
/* */
/* Copyright 1996 Institut National de Recherche en Informatique et */
/* en Automatique. All rights reserved. This file is distributed */
/* under the terms of the GNU Library General Public License, with */
/* the special exception on linking described in file ../LICENSE. */
/* */
/***********************************************************************/
/* 1. Allocation functions doing the same work as the macros in the
case where [Setup_for_gc] and [Restore_after_gc] are no-ops.
2. Convenience functions related to allocation.
*/
#include <string.h>
#include "caml/alloc.h"
#include "caml/custom.h"
#include "caml/major_gc.h"
#include "caml/memory.h"
#include "caml/mlvalues.h"
#include "caml/stacks.h"
#define Setup_for_gc
#define Restore_after_gc
CAMLexport value caml_alloc (mlsize_t wosize, tag_t tag)
{
value result;
mlsize_t i;
Assert (tag < 256);
Assert (tag != Infix_tag);
if (wosize == 0){
result = Atom (tag);
}else if (wosize <= Max_young_wosize){
Alloc_small (result, wosize, tag);
if (tag < No_scan_tag){
for (i = 0; i < wosize; i++) Field (result, i) = Val_unit;
}
}else{
result = caml_alloc_shr (wosize, tag);
if (tag < No_scan_tag){
for (i = 0; i < wosize; i++) Field (result, i) = Val_unit;
}
result = caml_check_urgent_gc (result);
}
return result;
}
CAMLexport value caml_alloc_small (mlsize_t wosize, tag_t tag)
{
value result;
Assert (wosize > 0);
Assert (wosize <= Max_young_wosize);
Assert (tag < 256);
Alloc_small (result, wosize, tag);
return result;
}
/* [n] is a number of words (fields) */
CAMLexport value caml_alloc_tuple(mlsize_t n)
{
return caml_alloc(n, 0);
}
/* [len] is a number of bytes (chars) */
CAMLexport value caml_alloc_string (mlsize_t len)
{
value result;
mlsize_t offset_index;
mlsize_t wosize = (len + sizeof (value)) / sizeof (value);
if (wosize <= Max_young_wosize) {
Alloc_small (result, wosize, String_tag);
}else{
result = caml_alloc_shr (wosize, String_tag);
result = caml_check_urgent_gc (result);
}
Field (result, wosize - 1) = 0;
offset_index = Bsize_wsize (wosize) - 1;
Byte (result, offset_index) = offset_index - len;
return result;
}
/* [len] is a number of words.
[mem] and [max] are relative (without unit).
*/
CAMLexport value caml_alloc_final (mlsize_t len, final_fun fun,
mlsize_t mem, mlsize_t max)
{
return caml_alloc_custom(caml_final_custom_operations(fun),
len * sizeof(value), mem, max);
}
CAMLexport value caml_copy_string(char const *s)
{
int len;
value res;
len = strlen(s);
res = caml_alloc_string(len);
memmove(String_val(res), s, len);
return res;
}
CAMLexport value caml_alloc_array(value (*funct)(char const *),
char const ** arr)
{
CAMLparam0 ();
mlsize_t nbr, n;
CAMLlocal2 (v, result);
nbr = 0;
while (arr[nbr] != 0) nbr++;
if (nbr == 0) {
CAMLreturn (Atom(0));
} else {
result = caml_alloc (nbr, 0);
for (n = 0; n < nbr; n++) {
/* The two statements below must be separate because of evaluation
order (don't take the address &Field(result, n) before
calling funct, which may cause a GC and move result). */
v = funct(arr[n]);
caml_modify(&Field(result, n), v);
}
CAMLreturn (result);
}
}
CAMLexport value caml_copy_string_array(char const ** arr)
{
return caml_alloc_array(caml_copy_string, arr);
}
CAMLexport int caml_convert_flag_list(value list, int *flags)
{
int res;
res = 0;
while (list != Val_int(0)) {
res |= flags[Int_val(Field(list, 0))];
list = Field(list, 1);
}
return res;
}
/* For compiling let rec over values */
/* [size] is a [value] representing number of words (fields) */
CAMLprim value caml_alloc_dummy(value size)
{
mlsize_t wosize = Int_val(size);
if (wosize == 0) return Atom(0);
return caml_alloc (wosize, 0);
}
/* [size] is a [value] representing number of words (fields) */
CAMLprim value caml_alloc_dummy_function(value size,value arity)
{
/* the arity argument is used by the js_of_ocaml runtime */
return caml_alloc_dummy(size);
}
/* [size] is a [value] representing number of floats. */
CAMLprim value caml_alloc_dummy_float (value size)
{
mlsize_t wosize = Int_val(size) * Double_wosize;
if (wosize == 0) return Atom(0);
return caml_alloc (wosize, 0);
}
CAMLprim value caml_update_dummy(value dummy, value newval)
{
mlsize_t size, i;
tag_t tag;
size = Wosize_val(newval);
tag = Tag_val (newval);
Assert (size == Wosize_val(dummy));
Assert (tag < No_scan_tag || tag == Double_array_tag);
Tag_val(dummy) = tag;
if (tag == Double_array_tag){
size = Wosize_val (newval) / Double_wosize;
for (i = 0; i < size; i++){
Store_double_field (dummy, i, Double_field (newval, i));
}
}else{
for (i = 0; i < size; i++){
caml_modify (&Field(dummy, i), Field(newval, i));
}
}
return Val_unit;
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_1829_2 |
crossvul-cpp_data_good_1766_0 | /*
* Copyright 2003 Digi International (www.digi.com)
* Scott H Kilau <Scott_Kilau at digi dot com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*/
/************************************************************************
*
* This file implements the mgmt functionality for the
* Neo and ClassicBoard based product lines.
*
************************************************************************
*/
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/sched.h> /* For jiffies, task states */
#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
#include <linux/serial_reg.h>
#include <linux/termios.h>
#include <linux/uaccess.h> /* For copy_from_user/copy_to_user */
#include "dgnc_driver.h"
#include "dgnc_pci.h"
#include "dgnc_mgmt.h"
/* Our "in use" variables, to enforce 1 open only */
static int dgnc_mgmt_in_use[MAXMGMTDEVICES];
/*
* dgnc_mgmt_open()
*
* Open the mgmt/downld/dpa device
*/
int dgnc_mgmt_open(struct inode *inode, struct file *file)
{
unsigned long flags;
unsigned int minor = iminor(inode);
spin_lock_irqsave(&dgnc_global_lock, flags);
/* mgmt device */
if (minor < MAXMGMTDEVICES) {
/* Only allow 1 open at a time on mgmt device */
if (dgnc_mgmt_in_use[minor]) {
spin_unlock_irqrestore(&dgnc_global_lock, flags);
return -EBUSY;
}
dgnc_mgmt_in_use[minor]++;
} else {
spin_unlock_irqrestore(&dgnc_global_lock, flags);
return -ENXIO;
}
spin_unlock_irqrestore(&dgnc_global_lock, flags);
return 0;
}
/*
* dgnc_mgmt_close()
*
* Open the mgmt/dpa device
*/
int dgnc_mgmt_close(struct inode *inode, struct file *file)
{
unsigned long flags;
unsigned int minor = iminor(inode);
spin_lock_irqsave(&dgnc_global_lock, flags);
/* mgmt device */
if (minor < MAXMGMTDEVICES) {
if (dgnc_mgmt_in_use[minor])
dgnc_mgmt_in_use[minor] = 0;
}
spin_unlock_irqrestore(&dgnc_global_lock, flags);
return 0;
}
/*
* dgnc_mgmt_ioctl()
*
* ioctl the mgmt/dpa device
*/
long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
unsigned long flags;
void __user *uarg = (void __user *)arg;
switch (cmd) {
case DIGI_GETDD:
{
/*
* This returns the total number of boards
* in the system, as well as driver version
* and has space for a reserved entry
*/
struct digi_dinfo ddi;
spin_lock_irqsave(&dgnc_global_lock, flags);
memset(&ddi, 0, sizeof(ddi));
ddi.dinfo_nboards = dgnc_NumBoards;
sprintf(ddi.dinfo_version, "%s", DG_PART);
spin_unlock_irqrestore(&dgnc_global_lock, flags);
if (copy_to_user(uarg, &ddi, sizeof(ddi)))
return -EFAULT;
break;
}
case DIGI_GETBD:
{
int brd;
struct digi_info di;
if (copy_from_user(&brd, uarg, sizeof(int)))
return -EFAULT;
if (brd < 0 || brd >= dgnc_NumBoards)
return -ENODEV;
memset(&di, 0, sizeof(di));
di.info_bdnum = brd;
spin_lock_irqsave(&dgnc_Board[brd]->bd_lock, flags);
di.info_bdtype = dgnc_Board[brd]->dpatype;
di.info_bdstate = dgnc_Board[brd]->dpastatus;
di.info_ioport = 0;
di.info_physaddr = (ulong)dgnc_Board[brd]->membase;
di.info_physsize = (ulong)dgnc_Board[brd]->membase
- dgnc_Board[brd]->membase_end;
if (dgnc_Board[brd]->state != BOARD_FAILED)
di.info_nports = dgnc_Board[brd]->nasync;
else
di.info_nports = 0;
spin_unlock_irqrestore(&dgnc_Board[brd]->bd_lock, flags);
if (copy_to_user(uarg, &di, sizeof(di)))
return -EFAULT;
break;
}
case DIGI_GET_NI_INFO:
{
struct channel_t *ch;
struct ni_info ni;
unsigned char mstat = 0;
uint board = 0;
uint channel = 0;
if (copy_from_user(&ni, uarg, sizeof(ni)))
return -EFAULT;
board = ni.board;
channel = ni.channel;
/* Verify boundaries on board */
if (board >= dgnc_NumBoards)
return -ENODEV;
/* Verify boundaries on channel */
if (channel >= dgnc_Board[board]->nasync)
return -ENODEV;
ch = dgnc_Board[board]->channels[channel];
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return -ENODEV;
memset(&ni, 0, sizeof(ni));
ni.board = board;
ni.channel = channel;
spin_lock_irqsave(&ch->ch_lock, flags);
mstat = (ch->ch_mostat | ch->ch_mistat);
if (mstat & UART_MCR_DTR) {
ni.mstat |= TIOCM_DTR;
ni.dtr = TIOCM_DTR;
}
if (mstat & UART_MCR_RTS) {
ni.mstat |= TIOCM_RTS;
ni.rts = TIOCM_RTS;
}
if (mstat & UART_MSR_CTS) {
ni.mstat |= TIOCM_CTS;
ni.cts = TIOCM_CTS;
}
if (mstat & UART_MSR_RI) {
ni.mstat |= TIOCM_RI;
ni.ri = TIOCM_RI;
}
if (mstat & UART_MSR_DCD) {
ni.mstat |= TIOCM_CD;
ni.dcd = TIOCM_CD;
}
if (mstat & UART_MSR_DSR)
ni.mstat |= TIOCM_DSR;
ni.iflag = ch->ch_c_iflag;
ni.oflag = ch->ch_c_oflag;
ni.cflag = ch->ch_c_cflag;
ni.lflag = ch->ch_c_lflag;
if (ch->ch_digi.digi_flags & CTSPACE ||
ch->ch_c_cflag & CRTSCTS)
ni.hflow = 1;
else
ni.hflow = 0;
if ((ch->ch_flags & CH_STOPI) ||
(ch->ch_flags & CH_FORCED_STOPI))
ni.recv_stopped = 1;
else
ni.recv_stopped = 0;
if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_FORCED_STOP))
ni.xmit_stopped = 1;
else
ni.xmit_stopped = 0;
ni.curtx = ch->ch_txcount;
ni.currx = ch->ch_rxcount;
ni.baud = ch->ch_old_baud;
spin_unlock_irqrestore(&ch->ch_lock, flags);
if (copy_to_user(uarg, &ni, sizeof(ni)))
return -EFAULT;
break;
}
}
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_1766_0 |
crossvul-cpp_data_bad_2454_0 | /*
* slcan.c - serial line CAN interface driver (using tty line discipline)
*
* This file is derived from linux/drivers/net/slip/slip.c
*
* slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
* Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
* slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see http://www.gnu.org/licenses/gpl.html
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/string.h>
#include <linux/tty.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/skb.h>
#include <linux/can/can-ml.h>
MODULE_ALIAS_LDISC(N_SLCAN);
MODULE_DESCRIPTION("serial line CAN interface");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
#define SLCAN_MAGIC 0x53CA
static int maxdev = 10; /* MAX number of SLCAN channels;
This can be overridden with
insmod slcan.ko maxdev=nnn */
module_param(maxdev, int, 0);
MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
/* maximum rx buffer len: extended CAN frame with timestamp */
#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
#define SLC_CMD_LEN 1
#define SLC_SFF_ID_LEN 3
#define SLC_EFF_ID_LEN 8
struct slcan {
int magic;
/* Various fields. */
struct tty_struct *tty; /* ptr to TTY structure */
struct net_device *dev; /* easy for intr handling */
spinlock_t lock;
struct work_struct tx_work; /* Flushes transmit buffer */
/* These are pointers to the malloc()ed frame buffers. */
unsigned char rbuff[SLC_MTU]; /* receiver buffer */
int rcount; /* received chars counter */
unsigned char xbuff[SLC_MTU]; /* transmitter buffer */
unsigned char *xhead; /* pointer to next XMIT byte */
int xleft; /* bytes left in XMIT queue */
unsigned long flags; /* Flag values/ mode etc */
#define SLF_INUSE 0 /* Channel in use */
#define SLF_ERROR 1 /* Parity, etc. error */
};
static struct net_device **slcan_devs;
/************************************************************************
* SLCAN ENCAPSULATION FORMAT *
************************************************************************/
/*
* A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
* frame format) a data length code (can_dlc) which can be from 0 to 8
* and up to <can_dlc> data bytes as payload.
* Additionally a CAN frame may become a remote transmission frame if the
* RTR-bit is set. This causes another ECU to send a CAN frame with the
* given can_id.
*
* The SLCAN ASCII representation of these different frame types is:
* <type> <id> <dlc> <data>*
*
* Extended frames (29 bit) are defined by capital characters in the type.
* RTR frames are defined as 'r' types - normal frames have 't' type:
* t => 11 bit data frame
* r => 11 bit RTR frame
* T => 29 bit data frame
* R => 29 bit RTR frame
*
* The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
* The <dlc> is a one byte ASCII number ('0' - '8')
* The <data> section has at much ASCII Hex bytes as defined by the <dlc>
*
* Examples:
*
* t1230 : can_id 0x123, can_dlc 0, no data
* t4563112233 : can_id 0x456, can_dlc 3, data 0x11 0x22 0x33
* T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, can_dlc 2, data 0xAA 0x55
* r1230 : can_id 0x123, can_dlc 0, no data, remote transmission request
*
*/
/************************************************************************
* STANDARD SLCAN DECAPSULATION *
************************************************************************/
/* Send one completely decapsulated can_frame to the network layer */
static void slc_bump(struct slcan *sl)
{
struct sk_buff *skb;
struct can_frame cf;
int i, tmp;
u32 tmpid;
char *cmd = sl->rbuff;
cf.can_id = 0;
switch (*cmd) {
case 'r':
cf.can_id = CAN_RTR_FLAG;
/* fallthrough */
case 't':
/* store dlc ASCII value and terminate SFF CAN ID string */
cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0;
/* point to payload data behind the dlc */
cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1;
break;
case 'R':
cf.can_id = CAN_RTR_FLAG;
/* fallthrough */
case 'T':
cf.can_id |= CAN_EFF_FLAG;
/* store dlc ASCII value and terminate EFF CAN ID string */
cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0;
/* point to payload data behind the dlc */
cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1;
break;
default:
return;
}
if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid))
return;
cf.can_id |= tmpid;
/* get can_dlc from sanitized ASCII value */
if (cf.can_dlc >= '0' && cf.can_dlc < '9')
cf.can_dlc -= '0';
else
return;
*(u64 *) (&cf.data) = 0; /* clear payload */
/* RTR frames may have a dlc > 0 but they never have any data bytes */
if (!(cf.can_id & CAN_RTR_FLAG)) {
for (i = 0; i < cf.can_dlc; i++) {
tmp = hex_to_bin(*cmd++);
if (tmp < 0)
return;
cf.data[i] = (tmp << 4);
tmp = hex_to_bin(*cmd++);
if (tmp < 0)
return;
cf.data[i] |= tmp;
}
}
skb = dev_alloc_skb(sizeof(struct can_frame) +
sizeof(struct can_skb_priv));
if (!skb)
return;
skb->dev = sl->dev;
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = sl->dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
skb_put_data(skb, &cf, sizeof(struct can_frame));
sl->dev->stats.rx_packets++;
sl->dev->stats.rx_bytes += cf.can_dlc;
netif_rx_ni(skb);
}
/* parse tty input stream */
static void slcan_unesc(struct slcan *sl, unsigned char s)
{
if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
(sl->rcount > 4)) {
slc_bump(sl);
}
sl->rcount = 0;
} else {
if (!test_bit(SLF_ERROR, &sl->flags)) {
if (sl->rcount < SLC_MTU) {
sl->rbuff[sl->rcount++] = s;
return;
} else {
sl->dev->stats.rx_over_errors++;
set_bit(SLF_ERROR, &sl->flags);
}
}
}
}
/************************************************************************
* STANDARD SLCAN ENCAPSULATION *
************************************************************************/
/* Encapsulate one can_frame and stuff into a TTY queue. */
static void slc_encaps(struct slcan *sl, struct can_frame *cf)
{
int actual, i;
unsigned char *pos;
unsigned char *endpos;
canid_t id = cf->can_id;
pos = sl->xbuff;
if (cf->can_id & CAN_RTR_FLAG)
*pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
else
*pos = 'T'; /* becomes 't' in standard frame format (SSF) */
/* determine number of chars for the CAN-identifier */
if (cf->can_id & CAN_EFF_FLAG) {
id &= CAN_EFF_MASK;
endpos = pos + SLC_EFF_ID_LEN;
} else {
*pos |= 0x20; /* convert R/T to lower case for SFF */
id &= CAN_SFF_MASK;
endpos = pos + SLC_SFF_ID_LEN;
}
/* build 3 (SFF) or 8 (EFF) digit CAN identifier */
pos++;
while (endpos >= pos) {
*endpos-- = hex_asc_upper[id & 0xf];
id >>= 4;
}
pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN;
*pos++ = cf->can_dlc + '0';
/* RTR frames may have a dlc > 0 but they never have any data bytes */
if (!(cf->can_id & CAN_RTR_FLAG)) {
for (i = 0; i < cf->can_dlc; i++)
pos = hex_byte_pack_upper(pos, cf->data[i]);
}
*pos++ = '\r';
/* Order of next two lines is *very* important.
* When we are sending a little amount of data,
* the transfer may be completed inside the ops->write()
* routine, because it's running with interrupts enabled.
* In this case we *never* got WRITE_WAKEUP event,
* if we did not request it before write operation.
* 14 Oct 1994 Dmitry Gorodchanin.
*/
set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
sl->xleft = (pos - sl->xbuff) - actual;
sl->xhead = sl->xbuff + actual;
sl->dev->stats.tx_bytes += cf->can_dlc;
}
/* Write out any remaining transmit buffer. Scheduled when tty is writable */
static void slcan_transmit(struct work_struct *work)
{
struct slcan *sl = container_of(work, struct slcan, tx_work);
int actual;
spin_lock_bh(&sl->lock);
/* First make sure we're connected. */
if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
spin_unlock_bh(&sl->lock);
return;
}
if (sl->xleft <= 0) {
/* Now serial buffer is almost free & we can start
* transmission of another packet */
sl->dev->stats.tx_packets++;
clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
spin_unlock_bh(&sl->lock);
netif_wake_queue(sl->dev);
return;
}
actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
sl->xleft -= actual;
sl->xhead += actual;
spin_unlock_bh(&sl->lock);
}
/*
* Called by the driver when there's room for more data.
* Schedule the transmit.
*/
static void slcan_write_wakeup(struct tty_struct *tty)
{
struct slcan *sl;
rcu_read_lock();
sl = rcu_dereference(tty->disc_data);
if (sl)
schedule_work(&sl->tx_work);
rcu_read_unlock();
}
/* Send a can_frame to a TTY queue. */
static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct slcan *sl = netdev_priv(dev);
if (skb->len != CAN_MTU)
goto out;
spin_lock(&sl->lock);
if (!netif_running(dev)) {
spin_unlock(&sl->lock);
printk(KERN_WARNING "%s: xmit: iface is down\n", dev->name);
goto out;
}
if (sl->tty == NULL) {
spin_unlock(&sl->lock);
goto out;
}
netif_stop_queue(sl->dev);
slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */
spin_unlock(&sl->lock);
out:
kfree_skb(skb);
return NETDEV_TX_OK;
}
/******************************************
* Routines looking at netdevice side.
******************************************/
/* Netdevice UP -> DOWN routine */
static int slc_close(struct net_device *dev)
{
struct slcan *sl = netdev_priv(dev);
spin_lock_bh(&sl->lock);
if (sl->tty) {
/* TTY discipline is running. */
clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
}
netif_stop_queue(dev);
sl->rcount = 0;
sl->xleft = 0;
spin_unlock_bh(&sl->lock);
return 0;
}
/* Netdevice DOWN -> UP routine */
static int slc_open(struct net_device *dev)
{
struct slcan *sl = netdev_priv(dev);
if (sl->tty == NULL)
return -ENODEV;
sl->flags &= (1 << SLF_INUSE);
netif_start_queue(dev);
return 0;
}
/* Hook the destructor so we can free slcan devs at the right point in time */
static void slc_free_netdev(struct net_device *dev)
{
int i = dev->base_addr;
slcan_devs[i] = NULL;
}
static int slcan_change_mtu(struct net_device *dev, int new_mtu)
{
return -EINVAL;
}
static const struct net_device_ops slc_netdev_ops = {
.ndo_open = slc_open,
.ndo_stop = slc_close,
.ndo_start_xmit = slc_xmit,
.ndo_change_mtu = slcan_change_mtu,
};
static void slc_setup(struct net_device *dev)
{
dev->netdev_ops = &slc_netdev_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = slc_free_netdev;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 10;
dev->mtu = CAN_MTU;
dev->type = ARPHRD_CAN;
/* New-style flags. */
dev->flags = IFF_NOARP;
dev->features = NETIF_F_HW_CSUM;
}
/******************************************
Routines looking at TTY side.
******************************************/
/*
* Handle the 'receiver data ready' interrupt.
* This function is called by the 'tty_io' module in the kernel when
* a block of SLCAN data has been received, which can now be decapsulated
* and sent on to some IP layer for further processing. This will not
* be re-entered while running but other ldisc functions may be called
* in parallel
*/
static void slcan_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct slcan *sl = (struct slcan *) tty->disc_data;
if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
return;
/* Read the characters out of the buffer */
while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
sl->dev->stats.rx_errors++;
cp++;
continue;
}
slcan_unesc(sl, *cp++);
}
}
/************************************
* slcan_open helper routines.
************************************/
/* Collect hanged up channels */
static void slc_sync(void)
{
int i;
struct net_device *dev;
struct slcan *sl;
for (i = 0; i < maxdev; i++) {
dev = slcan_devs[i];
if (dev == NULL)
break;
sl = netdev_priv(dev);
if (sl->tty)
continue;
if (dev->flags & IFF_UP)
dev_close(dev);
}
}
/* Find a free SLCAN channel, and link in this `tty' line. */
static struct slcan *slc_alloc(void)
{
int i;
char name[IFNAMSIZ];
struct net_device *dev = NULL;
struct slcan *sl;
int size;
for (i = 0; i < maxdev; i++) {
dev = slcan_devs[i];
if (dev == NULL)
break;
}
/* Sorry, too many, all slots in use */
if (i >= maxdev)
return NULL;
sprintf(name, "slcan%d", i);
size = ALIGN(sizeof(*sl), NETDEV_ALIGN) + sizeof(struct can_ml_priv);
dev = alloc_netdev(size, name, NET_NAME_UNKNOWN, slc_setup);
if (!dev)
return NULL;
dev->base_addr = i;
sl = netdev_priv(dev);
dev->ml_priv = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
/* Initialize channel control data */
sl->magic = SLCAN_MAGIC;
sl->dev = dev;
spin_lock_init(&sl->lock);
INIT_WORK(&sl->tx_work, slcan_transmit);
slcan_devs[i] = dev;
return sl;
}
/*
* Open the high-level part of the SLCAN channel.
* This function is called by the TTY module when the
* SLCAN line discipline is called for. Because we are
* sure the tty line exists, we only have to link it to
* a free SLCAN channel...
*
* Called in process context serialized from other ldisc calls.
*/
static int slcan_open(struct tty_struct *tty)
{
struct slcan *sl;
int err;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
/* RTnetlink lock is misused here to serialize concurrent
opens of slcan channels. There are better ways, but it is
the simplest one.
*/
rtnl_lock();
/* Collect hanged up channels. */
slc_sync();
sl = tty->disc_data;
err = -EEXIST;
/* First make sure we're not already connected. */
if (sl && sl->magic == SLCAN_MAGIC)
goto err_exit;
/* OK. Find a free SLCAN channel to use. */
err = -ENFILE;
sl = slc_alloc();
if (sl == NULL)
goto err_exit;
sl->tty = tty;
tty->disc_data = sl;
if (!test_bit(SLF_INUSE, &sl->flags)) {
/* Perform the low-level SLCAN initialization. */
sl->rcount = 0;
sl->xleft = 0;
set_bit(SLF_INUSE, &sl->flags);
err = register_netdevice(sl->dev);
if (err)
goto err_free_chan;
}
/* Done. We have linked the TTY line to a channel. */
rtnl_unlock();
tty->receive_room = 65536; /* We don't flow control */
/* TTY layer expects 0 on success */
return 0;
err_free_chan:
sl->tty = NULL;
tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags);
slc_free_netdev(sl->dev);
/* do not call free_netdev before rtnl_unlock */
rtnl_unlock();
free_netdev(sl->dev);
return err;
err_exit:
rtnl_unlock();
/* Count references from TTY module */
return err;
}
/*
* Close down a SLCAN channel.
* This means flushing out any pending queues, and then returning. This
* call is serialized against other ldisc functions.
*
* We also use this method for a hangup event.
*/
static void slcan_close(struct tty_struct *tty)
{
struct slcan *sl = (struct slcan *) tty->disc_data;
/* First make sure we're connected. */
if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
return;
spin_lock_bh(&sl->lock);
rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
synchronize_rcu();
flush_work(&sl->tx_work);
/* Flush network side */
unregister_netdev(sl->dev);
/* This will complete via sl_free_netdev */
}
static int slcan_hangup(struct tty_struct *tty)
{
slcan_close(tty);
return 0;
}
/* Perform I/O control on an active SLCAN channel. */
static int slcan_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct slcan *sl = (struct slcan *) tty->disc_data;
unsigned int tmp;
/* First make sure we're connected. */
if (!sl || sl->magic != SLCAN_MAGIC)
return -EINVAL;
switch (cmd) {
case SIOCGIFNAME:
tmp = strlen(sl->dev->name) + 1;
if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
return -EFAULT;
return 0;
case SIOCSIFHWADDR:
return -EINVAL;
default:
return tty_mode_ioctl(tty, file, cmd, arg);
}
}
static struct tty_ldisc_ops slc_ldisc = {
.owner = THIS_MODULE,
.magic = TTY_LDISC_MAGIC,
.name = "slcan",
.open = slcan_open,
.close = slcan_close,
.hangup = slcan_hangup,
.ioctl = slcan_ioctl,
.receive_buf = slcan_receive_buf,
.write_wakeup = slcan_write_wakeup,
};
static int __init slcan_init(void)
{
int status;
if (maxdev < 4)
maxdev = 4; /* Sanity */
pr_info("slcan: serial line CAN interface driver\n");
pr_info("slcan: %d dynamic interface channels.\n", maxdev);
slcan_devs = kcalloc(maxdev, sizeof(struct net_device *), GFP_KERNEL);
if (!slcan_devs)
return -ENOMEM;
/* Fill in our line protocol discipline, and register it */
status = tty_register_ldisc(N_SLCAN, &slc_ldisc);
if (status) {
printk(KERN_ERR "slcan: can't register line discipline\n");
kfree(slcan_devs);
}
return status;
}
static void __exit slcan_exit(void)
{
int i;
struct net_device *dev;
struct slcan *sl;
unsigned long timeout = jiffies + HZ;
int busy = 0;
if (slcan_devs == NULL)
return;
/* First of all: check for active disciplines and hangup them.
*/
do {
if (busy)
msleep_interruptible(100);
busy = 0;
for (i = 0; i < maxdev; i++) {
dev = slcan_devs[i];
if (!dev)
continue;
sl = netdev_priv(dev);
spin_lock_bh(&sl->lock);
if (sl->tty) {
busy++;
tty_hangup(sl->tty);
}
spin_unlock_bh(&sl->lock);
}
} while (busy && time_before(jiffies, timeout));
/* FIXME: hangup is async so we should wait when doing this second
phase */
for (i = 0; i < maxdev; i++) {
dev = slcan_devs[i];
if (!dev)
continue;
slcan_devs[i] = NULL;
sl = netdev_priv(dev);
if (sl->tty) {
printk(KERN_ERR "%s: tty discipline still running\n",
dev->name);
}
unregister_netdev(dev);
}
kfree(slcan_devs);
slcan_devs = NULL;
i = tty_unregister_ldisc(N_SLCAN);
if (i)
printk(KERN_ERR "slcan: can't unregister ldisc (err %d)\n", i);
}
module_init(slcan_init);
module_exit(slcan_exit);
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_2454_0 |
crossvul-cpp_data_good_2737_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% M M AAA TTTTT L AAA BBBB %
% MM MM A A T L A A B B %
% M M M AAAAA T L AAAAA BBBB %
% M M A A T L A A B B %
% M M A A T LLLLL A A BBBB %
% %
% %
% Read MATLAB Image Format %
% %
% Software Design %
% Jaroslav Fojtik %
% 2001-2008 %
% %
% %
% Permission is hereby granted, free of charge, to any person obtaining a %
% copy of this software and associated documentation files ("ImageMagick"), %
% to deal in ImageMagick without restriction, including without limitation %
% the rights to use, copy, modify, merge, publish, distribute, sublicense, %
% and/or sell copies of ImageMagick, and to permit persons to whom the %
% ImageMagick is furnished to do so, subject to the following conditions: %
% %
% The above copyright notice and this permission notice shall be included in %
% all copies or substantial portions of ImageMagick. %
% %
% The software is provided "as is", without warranty of any kind, express or %
% implied, including but not limited to the warranties of merchantability, %
% fitness for a particular purpose and noninfringement. In no event shall %
% ImageMagick Studio be liable for any claim, damages or other liability, %
% whether in an action of contract, tort or otherwise, arising from, out of %
% or in connection with ImageMagick or the use or other dealings in %
% ImageMagick. %
% %
% Except as contained in this notice, the name of the ImageMagick Studio %
% shall not be used in advertising or otherwise to promote the sale, use or %
% other dealings in ImageMagick without prior written authorization from the %
% ImageMagick Studio. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/distort.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/resource_.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Forward declaration.
*/
static MagickBooleanType
WriteMATImage(const ImageInfo *,Image *,ExceptionInfo *);
/* Auto coloring method, sorry this creates some artefact inside data
MinReal+j*MaxComplex = red MaxReal+j*MaxComplex = black
MinReal+j*0 = white MaxReal+j*0 = black
MinReal+j*MinComplex = blue MaxReal+j*MinComplex = black
*/
typedef struct
{
char identific[124];
unsigned short Version;
char EndianIndicator[2];
unsigned long DataType;
unsigned long ObjectSize;
unsigned long unknown1;
unsigned long unknown2;
unsigned short unknown5;
unsigned char StructureFlag;
unsigned char StructureClass;
unsigned long unknown3;
unsigned long unknown4;
unsigned long DimFlag;
unsigned long SizeX;
unsigned long SizeY;
unsigned short Flag1;
unsigned short NameFlag;
}
MATHeader;
static const char *MonthsTab[12]={"Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"};
static const char *DayOfWTab[7]={"Sun","Mon","Tue","Wed","Thu","Fri","Sat"};
static const char *OsDesc=
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
"PCWIN";
#else
#ifdef __APPLE__
"MAC";
#else
"LNX86";
#endif
#endif
typedef enum
{
miINT8 = 1, /* 8 bit signed */
miUINT8, /* 8 bit unsigned */
miINT16, /* 16 bit signed */
miUINT16, /* 16 bit unsigned */
miINT32, /* 32 bit signed */
miUINT32, /* 32 bit unsigned */
miSINGLE, /* IEEE 754 single precision float */
miRESERVE1,
miDOUBLE, /* IEEE 754 double precision float */
miRESERVE2,
miRESERVE3,
miINT64, /* 64 bit signed */
miUINT64, /* 64 bit unsigned */
miMATRIX, /* MATLAB array */
miCOMPRESSED, /* Compressed Data */
miUTF8, /* Unicode UTF-8 Encoded Character Data */
miUTF16, /* Unicode UTF-16 Encoded Character Data */
miUTF32 /* Unicode UTF-32 Encoded Character Data */
} mat5_data_type;
typedef enum
{
mxCELL_CLASS=1, /* cell array */
mxSTRUCT_CLASS, /* structure */
mxOBJECT_CLASS, /* object */
mxCHAR_CLASS, /* character array */
mxSPARSE_CLASS, /* sparse array */
mxDOUBLE_CLASS, /* double precision array */
mxSINGLE_CLASS, /* single precision floating point */
mxINT8_CLASS, /* 8 bit signed integer */
mxUINT8_CLASS, /* 8 bit unsigned integer */
mxINT16_CLASS, /* 16 bit signed integer */
mxUINT16_CLASS, /* 16 bit unsigned integer */
mxINT32_CLASS, /* 32 bit signed integer */
mxUINT32_CLASS, /* 32 bit unsigned integer */
mxINT64_CLASS, /* 64 bit signed integer */
mxUINT64_CLASS, /* 64 bit unsigned integer */
mxFUNCTION_CLASS /* Function handle */
} arrayclasstype;
#define FLAG_COMPLEX 0x8
#define FLAG_GLOBAL 0x4
#define FLAG_LOGICAL 0x2
static const QuantumType z2qtype[4] = {GrayQuantum, BlueQuantum, GreenQuantum, RedQuantum};
static void InsertComplexDoubleRow(Image *image,double *p,int y,double MinVal,
double MaxVal,ExceptionInfo *exception)
{
double f;
int x;
register Quantum *q;
if (MinVal == 0)
MinVal = -1;
if (MaxVal == 0)
MaxVal = 1;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
return;
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (*p > 0)
{
f = (*p / MaxVal) * (QuantumRange-GetPixelRed(image,q));
if (f + GetPixelRed(image,q) > QuantumRange)
SetPixelRed(image,QuantumRange,q);
else
SetPixelRed(image,GetPixelRed(image,q)+(int) f,q);
if ((int) f / 2.0 > GetPixelGreen(image,q))
{
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
}
else
{
SetPixelBlue(image,GetPixelBlue(image,q)-(int) (f/2.0),q);
SetPixelGreen(image,GetPixelBlue(image,q),q);
}
}
if (*p < 0)
{
f = (*p / MaxVal) * (QuantumRange-GetPixelBlue(image,q));
if (f+GetPixelBlue(image,q) > QuantumRange)
SetPixelBlue(image,QuantumRange,q);
else
SetPixelBlue(image,GetPixelBlue(image,q)+(int) f,q);
if ((int) f / 2.0 > GetPixelGreen(image,q))
{
SetPixelRed(image,0,q);
SetPixelGreen(image,0,q);
}
else
{
SetPixelRed(image,GetPixelRed(image,q)-(int) (f/2.0),q);
SetPixelGreen(image,GetPixelRed(image,q),q);
}
}
p++;
q+=GetPixelChannels(image);
}
if (!SyncAuthenticPixels(image,exception))
return;
return;
}
static void InsertComplexFloatRow(Image *image,float *p,int y,double MinVal,
double MaxVal,ExceptionInfo *exception)
{
double f;
int x;
register Quantum *q;
if (MinVal == 0)
MinVal = -1;
if (MaxVal == 0)
MaxVal = 1;
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return;
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (*p > 0)
{
f = (*p / MaxVal) * (QuantumRange-GetPixelRed(image,q));
if (f+GetPixelRed(image,q) > QuantumRange)
SetPixelRed(image,QuantumRange,q);
else
SetPixelRed(image,GetPixelRed(image,q)+(int) f,q);
if ((int) f / 2.0 > GetPixelGreen(image,q))
{
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
}
else
{
SetPixelBlue(image,GetPixelBlue(image,q)-(int) (f/2.0),q);
SetPixelGreen(image,GetPixelBlue(image,q),q);
}
}
if (*p < 0)
{
f = (*p / MaxVal) * (QuantumRange - GetPixelBlue(image,q));
if (f + GetPixelBlue(image,q) > QuantumRange)
SetPixelBlue(image,QuantumRange,q);
else
SetPixelBlue(image,GetPixelBlue(image,q)+
(int) f,q);
if ((int) f / 2.0 > GetPixelGreen(image,q))
{
SetPixelGreen(image,0,q);
SetPixelRed(image,0,q);
}
else
{
SetPixelRed(image,GetPixelRed(image,q)-(int) (f/2.0),q);
SetPixelGreen(image,GetPixelRed(image,q),q);
}
}
p++;
q++;
}
if (!SyncAuthenticPixels(image,exception))
return;
return;
}
/************** READERS ******************/
/* This function reads one block of floats*/
static void ReadBlobFloatsLSB(Image * image, size_t len, float *data)
{
while (len >= 4)
{
*data++ = ReadBlobFloat(image);
len -= sizeof(float);
}
if (len > 0)
(void) SeekBlob(image, len, SEEK_CUR);
}
static void ReadBlobFloatsMSB(Image * image, size_t len, float *data)
{
while (len >= 4)
{
*data++ = ReadBlobFloat(image);
len -= sizeof(float);
}
if (len > 0)
(void) SeekBlob(image, len, SEEK_CUR);
}
/* This function reads one block of doubles*/
static void ReadBlobDoublesLSB(Image * image, size_t len, double *data)
{
while (len >= 8)
{
*data++ = ReadBlobDouble(image);
len -= sizeof(double);
}
if (len > 0)
(void) SeekBlob(image, len, SEEK_CUR);
}
static void ReadBlobDoublesMSB(Image * image, size_t len, double *data)
{
while (len >= 8)
{
*data++ = ReadBlobDouble(image);
len -= sizeof(double);
}
if (len > 0)
(void) SeekBlob(image, len, SEEK_CUR);
}
/* Calculate minimum and maximum from a given block of data */
static void CalcMinMax(Image *image, int endian_indicator, int SizeX, int SizeY, size_t CellType, unsigned ldblk, void *BImgBuff, double *Min, double *Max)
{
MagickOffsetType filepos;
int i, x;
void (*ReadBlobDoublesXXX)(Image * image, size_t len, double *data);
void (*ReadBlobFloatsXXX)(Image * image, size_t len, float *data);
double *dblrow;
float *fltrow;
if (endian_indicator == LSBEndian)
{
ReadBlobDoublesXXX = ReadBlobDoublesLSB;
ReadBlobFloatsXXX = ReadBlobFloatsLSB;
}
else /* MI */
{
ReadBlobDoublesXXX = ReadBlobDoublesMSB;
ReadBlobFloatsXXX = ReadBlobFloatsMSB;
}
filepos = TellBlob(image); /* Please note that file seeking occurs only in the case of doubles */
for (i = 0; i < SizeY; i++)
{
if (CellType==miDOUBLE)
{
ReadBlobDoublesXXX(image, ldblk, (double *)BImgBuff);
dblrow = (double *)BImgBuff;
if (i == 0)
{
*Min = *Max = *dblrow;
}
for (x = 0; x < SizeX; x++)
{
if (*Min > *dblrow)
*Min = *dblrow;
if (*Max < *dblrow)
*Max = *dblrow;
dblrow++;
}
}
if (CellType==miSINGLE)
{
ReadBlobFloatsXXX(image, ldblk, (float *)BImgBuff);
fltrow = (float *)BImgBuff;
if (i == 0)
{
*Min = *Max = *fltrow;
}
for (x = 0; x < (ssize_t) SizeX; x++)
{
if (*Min > *fltrow)
*Min = *fltrow;
if (*Max < *fltrow)
*Max = *fltrow;
fltrow++;
}
}
}
(void) SeekBlob(image, filepos, SEEK_SET);
}
static void FixSignedValues(const Image *image,Quantum *q, int y)
{
while(y-->0)
{
/* Please note that negative values will overflow
Q=8; QuantumRange=255: <0;127> + 127+1 = <128; 255>
<-1;-128> + 127+1 = <0; 127> */
SetPixelRed(image,GetPixelRed(image,q)+QuantumRange/2+1,q);
SetPixelGreen(image,GetPixelGreen(image,q)+QuantumRange/2+1,q);
SetPixelBlue(image,GetPixelBlue(image,q)+QuantumRange/2+1,q);
q++;
}
}
/** Fix whole row of logical/binary data. It means pack it. */
static void FixLogical(unsigned char *Buff,int ldblk)
{
unsigned char mask=128;
unsigned char *BuffL = Buff;
unsigned char val = 0;
while(ldblk-->0)
{
if(*Buff++ != 0)
val |= mask;
mask >>= 1;
if(mask==0)
{
*BuffL++ = val;
val = 0;
mask = 128;
}
}
*BuffL = val;
}
#if defined(MAGICKCORE_ZLIB_DELEGATE)
static voidpf AcquireZIPMemory(voidpf context,unsigned int items,
unsigned int size)
{
(void) context;
return((voidpf) AcquireQuantumMemory(items,size));
}
static void RelinquishZIPMemory(voidpf context,voidpf memory)
{
(void) context;
memory=RelinquishMagickMemory(memory);
}
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
/** This procedure decompreses an image block for a new MATLAB format. */
static Image *DecompressBlock(Image *orig, MagickOffsetType Size, ImageInfo *clone_info, ExceptionInfo *exception)
{
Image *image2;
void *CacheBlock, *DecompressBlock;
z_stream zip_info;
FILE *mat_file;
size_t magick_size;
size_t extent;
int file;
int status;
int zip_status;
if(clone_info==NULL) return NULL;
if(clone_info->file) /* Close file opened from previous transaction. */
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
CacheBlock = AcquireQuantumMemory((size_t)((Size<16384)?Size:16384),sizeof(unsigned char *));
if(CacheBlock==NULL) return NULL;
DecompressBlock = AcquireQuantumMemory((size_t)(4096),sizeof(unsigned char *));
if(DecompressBlock==NULL)
{
RelinquishMagickMemory(CacheBlock);
return NULL;
}
mat_file=0;
file = AcquireUniqueFileResource(clone_info->filename);
if (file != -1)
mat_file = fdopen(file,"w");
if(!mat_file)
{
RelinquishMagickMemory(CacheBlock);
RelinquishMagickMemory(DecompressBlock);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Cannot create file stream for decompressed image");
return NULL;
}
zip_info.zalloc=AcquireZIPMemory;
zip_info.zfree=RelinquishZIPMemory;
zip_info.opaque = (voidpf) NULL;
zip_status = inflateInit(&zip_info);
if (zip_status != Z_OK)
{
RelinquishMagickMemory(CacheBlock);
RelinquishMagickMemory(DecompressBlock);
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"UnableToUncompressImage","`%s'",clone_info->filename);
(void) fclose(mat_file);
RelinquishUniqueFileResource(clone_info->filename);
return NULL;
}
/* zip_info.next_out = 8*4;*/
zip_info.avail_in = 0;
zip_info.total_out = 0;
while(Size>0 && !EOFBlob(orig))
{
magick_size = ReadBlob(orig, (Size<16384)?Size:16384, (unsigned char *) CacheBlock);
zip_info.next_in = (Bytef *) CacheBlock;
zip_info.avail_in = (uInt) magick_size;
while(zip_info.avail_in>0)
{
zip_info.avail_out = 4096;
zip_info.next_out = (Bytef *) DecompressBlock;
zip_status = inflate(&zip_info,Z_NO_FLUSH);
if ((zip_status != Z_OK) && (zip_status != Z_STREAM_END))
break;
extent=fwrite(DecompressBlock, 4096-zip_info.avail_out, 1, mat_file);
(void) extent;
if(zip_status == Z_STREAM_END) goto DblBreak;
}
if ((zip_status != Z_OK) && (zip_status != Z_STREAM_END))
break;
Size -= magick_size;
}
DblBreak:
inflateEnd(&zip_info);
(void)fclose(mat_file);
RelinquishMagickMemory(CacheBlock);
RelinquishMagickMemory(DecompressBlock);
if((clone_info->file=fopen(clone_info->filename,"rb"))==NULL) goto UnlinkFile;
if( (image2 = AcquireImage(clone_info,exception))==NULL ) goto EraseFile;
status = OpenBlob(clone_info,image2,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
DeleteImageFromList(&image2);
EraseFile:
fclose(clone_info->file);
clone_info->file = NULL;
UnlinkFile:
RelinquishUniqueFileResource(clone_info->filename);
return NULL;
}
return image2;
}
#endif
static Image *ReadMATImageV4(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
typedef struct {
unsigned char Type[4];
unsigned int nRows;
unsigned int nCols;
unsigned int imagf;
unsigned int nameLen;
} MAT4_HDR;
long
ldblk;
EndianType
endian;
Image
*rotate_image;
MagickBooleanType
status;
MAT4_HDR
HDR;
QuantumInfo
*quantum_info;
QuantumFormatType
format_type;
register ssize_t
i;
ssize_t
count,
y;
unsigned char
*pixels;
unsigned int
depth;
(void) SeekBlob(image,0,SEEK_SET);
while (EOFBlob(image) != MagickFalse)
{
/*
Object parser loop.
*/
ldblk=ReadBlobLSBLong(image);
if ((ldblk > 9999) || (ldblk < 0))
break;
HDR.Type[3]=ldblk % 10; ldblk /= 10; /* T digit */
HDR.Type[2]=ldblk % 10; ldblk /= 10; /* P digit */
HDR.Type[1]=ldblk % 10; ldblk /= 10; /* O digit */
HDR.Type[0]=ldblk; /* M digit */
if (HDR.Type[3] != 0)
break; /* Data format */
if (HDR.Type[2] != 0)
break; /* Always 0 */
if (HDR.Type[0] == 0)
{
HDR.nRows=ReadBlobLSBLong(image);
HDR.nCols=ReadBlobLSBLong(image);
HDR.imagf=ReadBlobLSBLong(image);
HDR.nameLen=ReadBlobLSBLong(image);
endian=LSBEndian;
}
else
{
HDR.nRows=ReadBlobMSBLong(image);
HDR.nCols=ReadBlobMSBLong(image);
HDR.imagf=ReadBlobMSBLong(image);
HDR.nameLen=ReadBlobMSBLong(image);
endian=MSBEndian;
}
if ((HDR.imagf != 0) && (HDR.imagf != 1))
break;
if (HDR.nameLen > 0xFFFF)
return((Image *) NULL);
for (i=0; i < (ssize_t) HDR.nameLen; i++)
{
int
byte;
/*
Skip matrix name.
*/
byte=ReadBlobByte(image);
if (byte == EOF)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
}
image->columns=(size_t) HDR.nRows;
image->rows=(size_t) HDR.nCols;
SetImageColorspace(image,GRAYColorspace,exception);
if (image_info->ping != MagickFalse)
{
Swap(image->columns,image->rows);
return(image);
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return((Image *) NULL);
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
return((Image *) NULL);
switch(HDR.Type[1])
{
case 0:
format_type=FloatingPointQuantumFormat;
depth=64;
break;
case 1:
format_type=FloatingPointQuantumFormat;
depth=32;
break;
case 2:
format_type=UnsignedQuantumFormat;
depth=16;
break;
case 3:
format_type=SignedQuantumFormat;
depth=16;
break;
case 4:
format_type=UnsignedQuantumFormat;
depth=8;
break;
default:
format_type=UnsignedQuantumFormat;
depth=8;
break;
}
image->depth=depth;
if (HDR.Type[0] != 0)
SetQuantumEndian(image,quantum_info,MSBEndian);
status=SetQuantumFormat(image,quantum_info,format_type);
status=SetQuantumDepth(image,quantum_info,depth);
status=SetQuantumEndian(image,quantum_info,endian);
SetQuantumScale(quantum_info,1.0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
for (y=0; y < (ssize_t) image->rows; y++)
{
int
status;
register Quantum
*magick_restrict q;
count=ReadBlob(image,depth/8*image->columns,(char *) pixels);
if (count == -1)
break;
q=QueueAuthenticPixels(image,0,image->rows-y-1,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
GrayQuantum,pixels,exception);
if ((HDR.Type[1] == 2) || (HDR.Type[1] == 3))
FixSignedValues(image,q,(int) image->columns);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if (HDR.imagf == 1)
for (y=0; y < (ssize_t) image->rows; y++)
{
/*
Read complex pixels.
*/
count=ReadBlob(image,depth/8*image->columns,(char *) pixels);
if (count == -1)
break;
if (HDR.Type[1] == 0)
InsertComplexDoubleRow(image,(double *) pixels,y,0,0,exception);
else
InsertComplexFloatRow(image,(float *) pixels,y,0,0,exception);
}
quantum_info=DestroyQuantumInfo(quantum_info);
rotate_image=RotateImage(image,90.0,exception);
if (rotate_image != (Image *) NULL)
{
image=DestroyImage(image);
image=rotate_image;
}
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d M A T L A B i m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadMATImage() reads an MAT X image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadMATImage method is:
%
% Image *ReadMATImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Method ReadMATImage returns a pointer to the image after
% reading. A null image is returned if there is a memory shortage or if
% the image cannot be read.
%
% o image_info: Specifies a pointer to a ImageInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadMATImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image *image, *image2=NULL,
*rotated_image;
register Quantum *q;
unsigned int status;
MATHeader MATLAB_HDR;
size_t size;
size_t CellType;
QuantumInfo *quantum_info;
ImageInfo *clone_info;
int i;
ssize_t ldblk;
unsigned char *BImgBuff = NULL;
double MinVal, MaxVal;
unsigned z, z2;
unsigned Frames;
int logging;
int sample_size;
MagickOffsetType filepos=0x80;
BlobInfo *blob;
size_t one;
unsigned int (*ReadBlobXXXLong)(Image *image);
unsigned short (*ReadBlobXXXShort)(Image *image);
void (*ReadBlobDoublesXXX)(Image * image, size_t len, double *data);
void (*ReadBlobFloatsXXX)(Image * image, size_t len, float *data);
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
logging = LogMagickEvent(CoderEvent,GetMagickModule(),"enter");
/*
Open image file.
*/
image = AcquireImage(image_info,exception);
status = OpenBlob(image_info, image, ReadBinaryBlobMode, exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read MATLAB image.
*/
clone_info=CloneImageInfo(image_info);
if (ReadBlob(image,124,(unsigned char *) &MATLAB_HDR.identific) != 124)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (strncmp(MATLAB_HDR.identific,"MATLAB",6) != 0)
{
image2=ReadMATImageV4(image_info,image,exception);
if (image2 == NULL)
goto MATLAB_KO;
image=image2;
goto END_OF_READING;
}
MATLAB_HDR.Version = ReadBlobLSBShort(image);
if(ReadBlob(image,2,(unsigned char *) &MATLAB_HDR.EndianIndicator) != 2)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (logging)
(void) LogMagickEvent(CoderEvent,GetMagickModule()," Endian %c%c",
MATLAB_HDR.EndianIndicator[0],MATLAB_HDR.EndianIndicator[1]);
if (!strncmp(MATLAB_HDR.EndianIndicator, "IM", 2))
{
ReadBlobXXXLong = ReadBlobLSBLong;
ReadBlobXXXShort = ReadBlobLSBShort;
ReadBlobDoublesXXX = ReadBlobDoublesLSB;
ReadBlobFloatsXXX = ReadBlobFloatsLSB;
image->endian = LSBEndian;
}
else if (!strncmp(MATLAB_HDR.EndianIndicator, "MI", 2))
{
ReadBlobXXXLong = ReadBlobMSBLong;
ReadBlobXXXShort = ReadBlobMSBShort;
ReadBlobDoublesXXX = ReadBlobDoublesMSB;
ReadBlobFloatsXXX = ReadBlobFloatsMSB;
image->endian = MSBEndian;
}
else
goto MATLAB_KO; /* unsupported endian */
if (strncmp(MATLAB_HDR.identific, "MATLAB", 6))
MATLAB_KO: ThrowReaderException(CorruptImageError,"ImproperImageHeader");
filepos = TellBlob(image);
while(!EOFBlob(image)) /* object parser loop */
{
Frames = 1;
(void) SeekBlob(image,filepos,SEEK_SET);
/* printf("pos=%X\n",TellBlob(image)); */
MATLAB_HDR.DataType = ReadBlobXXXLong(image);
if(EOFBlob(image)) break;
MATLAB_HDR.ObjectSize = ReadBlobXXXLong(image);
if(EOFBlob(image)) break;
filepos += MATLAB_HDR.ObjectSize + 4 + 4;
image2 = image;
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if(MATLAB_HDR.DataType == miCOMPRESSED)
{
image2 = DecompressBlock(image,MATLAB_HDR.ObjectSize,clone_info,exception);
if(image2==NULL) continue;
MATLAB_HDR.DataType = ReadBlobXXXLong(image2); /* replace compressed object type. */
}
#endif
if(MATLAB_HDR.DataType!=miMATRIX) continue; /* skip another objects. */
MATLAB_HDR.unknown1 = ReadBlobXXXLong(image2);
MATLAB_HDR.unknown2 = ReadBlobXXXLong(image2);
MATLAB_HDR.unknown5 = ReadBlobXXXLong(image2);
MATLAB_HDR.StructureClass = MATLAB_HDR.unknown5 & 0xFF;
MATLAB_HDR.StructureFlag = (MATLAB_HDR.unknown5>>8) & 0xFF;
MATLAB_HDR.unknown3 = ReadBlobXXXLong(image2);
if(image!=image2)
MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2); /* ??? don't understand why ?? */
MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2);
MATLAB_HDR.DimFlag = ReadBlobXXXLong(image2);
MATLAB_HDR.SizeX = ReadBlobXXXLong(image2);
MATLAB_HDR.SizeY = ReadBlobXXXLong(image2);
switch(MATLAB_HDR.DimFlag)
{
case 8: z2=z=1; break; /* 2D matrix*/
case 12: z2=z = ReadBlobXXXLong(image2); /* 3D matrix RGB*/
(void) ReadBlobXXXLong(image2);
if(z!=3) ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
break;
case 16: z2=z = ReadBlobXXXLong(image2); /* 4D matrix animation */
if(z!=3 && z!=1)
ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
Frames = ReadBlobXXXLong(image2);
if (Frames == 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
break;
default: ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
}
MATLAB_HDR.Flag1 = ReadBlobXXXShort(image2);
MATLAB_HDR.NameFlag = ReadBlobXXXShort(image2);
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
"MATLAB_HDR.StructureClass %d",MATLAB_HDR.StructureClass);
if (MATLAB_HDR.StructureClass != mxCHAR_CLASS &&
MATLAB_HDR.StructureClass != mxSINGLE_CLASS && /* float + complex float */
MATLAB_HDR.StructureClass != mxDOUBLE_CLASS && /* double + complex double */
MATLAB_HDR.StructureClass != mxINT8_CLASS &&
MATLAB_HDR.StructureClass != mxUINT8_CLASS && /* uint8 + uint8 3D */
MATLAB_HDR.StructureClass != mxINT16_CLASS &&
MATLAB_HDR.StructureClass != mxUINT16_CLASS && /* uint16 + uint16 3D */
MATLAB_HDR.StructureClass != mxINT32_CLASS &&
MATLAB_HDR.StructureClass != mxUINT32_CLASS && /* uint32 + uint32 3D */
MATLAB_HDR.StructureClass != mxINT64_CLASS &&
MATLAB_HDR.StructureClass != mxUINT64_CLASS) /* uint64 + uint64 3D */
ThrowReaderException(CoderError,"UnsupportedCellTypeInTheMatrix");
switch (MATLAB_HDR.NameFlag)
{
case 0:
size = ReadBlobXXXLong(image2); /* Object name string size */
size = 4 * (ssize_t) ((size + 3 + 1) / 4);
(void) SeekBlob(image2, size, SEEK_CUR);
break;
case 1:
case 2:
case 3:
case 4:
(void) ReadBlob(image2, 4, (unsigned char *) &size); /* Object name string */
break;
default:
goto MATLAB_KO;
}
CellType = ReadBlobXXXLong(image2); /* Additional object type */
if (logging)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"MATLAB_HDR.CellType: %.20g",(double) CellType);
(void) ReadBlob(image2, 4, (unsigned char *) &size); /* data size */
NEXT_FRAME:
switch (CellType)
{
case miINT8:
case miUINT8:
sample_size = 8;
if(MATLAB_HDR.StructureFlag & FLAG_LOGICAL)
image->depth = 1;
else
image->depth = 8; /* Byte type cell */
ldblk = (ssize_t) MATLAB_HDR.SizeX;
break;
case miINT16:
case miUINT16:
sample_size = 16;
image->depth = 16; /* Word type cell */
ldblk = (ssize_t) (2 * MATLAB_HDR.SizeX);
break;
case miINT32:
case miUINT32:
sample_size = 32;
image->depth = 32; /* Dword type cell */
ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX);
break;
case miINT64:
case miUINT64:
sample_size = 64;
image->depth = 64; /* Qword type cell */
ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX);
break;
case miSINGLE:
sample_size = 32;
image->depth = 32; /* double type cell */
(void) SetImageOption(clone_info,"quantum:format","floating-point");
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* complex float type cell */
}
ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX);
break;
case miDOUBLE:
sample_size = 64;
image->depth = 64; /* double type cell */
(void) SetImageOption(clone_info,"quantum:format","floating-point");
DisableMSCWarning(4127)
if (sizeof(double) != 8)
RestoreMSCWarning
ThrowReaderException(CoderError, "IncompatibleSizeOfDouble");
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* complex double type cell */
}
ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX);
break;
default:
ThrowReaderException(CoderError, "UnsupportedCellTypeInTheMatrix");
}
(void) sample_size;
image->columns = MATLAB_HDR.SizeX;
image->rows = MATLAB_HDR.SizeY;
quantum_info=AcquireQuantumInfo(clone_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
one=1;
image->colors = one << image->depth;
if (image->columns == 0 || image->rows == 0)
goto MATLAB_KO;
/* Image is gray when no complex flag is set and 2D Matrix */
if ((MATLAB_HDR.DimFlag == 8) &&
((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0))
{
image->type=GrayscaleType;
SetImageColorspace(image,GRAYColorspace,exception);
}
/*
If ping is true, then only set image size and colors without
reading any image data.
*/
if (image_info->ping)
{
size_t temp = image->columns;
image->columns = image->rows;
image->rows = temp;
goto done_reading; /* !!!!!! BAD !!!! */
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/* ----- Load raster data ----- */
BImgBuff = (unsigned char *) AcquireQuantumMemory((size_t) (ldblk),sizeof(double)); /* Ldblk was set in the check phase */
if (BImgBuff == NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(BImgBuff,0,ldblk*sizeof(double));
MinVal = 0;
MaxVal = 0;
if (CellType==miDOUBLE || CellType==miSINGLE) /* Find Min and Max Values for floats */
{
CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &quantum_info->minimum, &quantum_info->maximum);
}
/* Main loop for reading all scanlines */
if(z==1) z=0; /* read grey scanlines */
/* else read color scanlines */
do
{
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
q=GetAuthenticPixels(image,0,MATLAB_HDR.SizeY-i-1,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT set image pixels returns unexpected NULL on a row %u.", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto done_reading; /* Skip image rotation, when cannot set image pixels */
}
if(ReadBlob(image2,ldblk,(unsigned char *)BImgBuff) != (ssize_t) ldblk)
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT cannot read scanrow %u from a file.", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto ExitLoop;
}
if((CellType==miINT8 || CellType==miUINT8) && (MATLAB_HDR.StructureFlag & FLAG_LOGICAL))
{
FixLogical((unsigned char *)BImgBuff,ldblk);
if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0)
{
ImportQuantumPixelsFailed:
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT failed to ImportQuantumPixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1));
break;
}
}
else
{
if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0)
goto ImportQuantumPixelsFailed;
if (z<=1 && /* fix only during a last pass z==0 || z==1 */
(CellType==miINT8 || CellType==miINT16 || CellType==miINT32 || CellType==miINT64))
FixSignedValues(image,q,MATLAB_HDR.SizeX);
}
if (!SyncAuthenticPixels(image,exception))
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT failed to sync image pixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto ExitLoop;
}
}
} while(z-- >= 2);
quantum_info=DestroyQuantumInfo(quantum_info);
ExitLoop:
/* Read complex part of numbers here */
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* Find Min and Max Values for complex parts of floats */
CellType = ReadBlobXXXLong(image2); /* Additional object type */
i = ReadBlobXXXLong(image2); /* size of a complex part - toss away*/
if (CellType==miDOUBLE || CellType==miSINGLE)
{
CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &MinVal, &MaxVal);
}
if (CellType==miDOUBLE)
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
ReadBlobDoublesXXX(image2, ldblk, (double *)BImgBuff);
InsertComplexDoubleRow(image, (double *)BImgBuff, i, MinVal, MaxVal,
exception);
}
if (CellType==miSINGLE)
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
ReadBlobFloatsXXX(image2, ldblk, (float *)BImgBuff);
InsertComplexFloatRow(image,(float *)BImgBuff,i,MinVal,MaxVal,
exception);
}
}
/* Image is gray when no complex flag is set and 2D Matrix AGAIN!!! */
if ((MATLAB_HDR.DimFlag == 8) &&
((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0))
image->type=GrayscaleType;
if (image->depth == 1)
image->type=BilevelType;
if(image2==image)
image2 = NULL; /* Remove shadow copy to an image before rotation. */
/* Rotate image. */
rotated_image = RotateImage(image, 90.0, exception);
if (rotated_image != (Image *) NULL)
{
/* Remove page offsets added by RotateImage */
rotated_image->page.x=0;
rotated_image->page.y=0;
blob = rotated_image->blob;
rotated_image->blob = image->blob;
rotated_image->colors = image->colors;
image->blob = blob;
AppendImageToList(&image,rotated_image);
DeleteImageFromList(&image);
}
done_reading:
if(image2!=NULL)
if(image2!=image)
{
DeleteImageFromList(&image2);
if(clone_info)
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
}
}
/* Allocate next image structure. */
AcquireNextImage(image_info,image,exception);
if (image->next == (Image *) NULL) break;
image=SyncNextImageInList(image);
image->columns=image->rows=0;
image->colors=0;
/* row scan buffer is no longer needed */
RelinquishMagickMemory(BImgBuff);
BImgBuff = NULL;
if(--Frames>0)
{
z = z2;
if(image2==NULL) image2 = image;
goto NEXT_FRAME;
}
if ((image2!=NULL) && (image2!=image)) /* Does shadow temporary decompressed image exist? */
{
/* CloseBlob(image2); */
DeleteImageFromList(&image2);
if(clone_info)
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
}
}
}
RelinquishMagickMemory(BImgBuff);
END_OF_READING:
clone_info=DestroyImageInfo(clone_info);
CloseBlob(image);
{
Image *p;
ssize_t scene=0;
/*
Rewind list, removing any empty images while rewinding.
*/
p=image;
image=NULL;
while (p != (Image *) NULL)
{
Image *tmp=p;
if ((p->rows == 0) || (p->columns == 0)) {
p=p->previous;
DeleteImageFromList(&tmp);
} else {
image=p;
p=p->previous;
}
}
/*
Fix scene numbers
*/
for (p=image; p != (Image *) NULL; p=p->next)
p->scene=scene++;
}
if(clone_info != NULL) /* cleanup garbage file from compression */
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
DestroyImageInfo(clone_info);
clone_info = NULL;
}
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),"return");
if(image==NULL)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
return (image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r M A T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Method RegisterMATImage adds attributes for the MAT image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterMATImage method is:
%
% size_t RegisterMATImage(void)
%
*/
ModuleExport size_t RegisterMATImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("MAT","MAT","MATLAB level 5 image format");
entry->decoder=(DecodeImageHandler *) ReadMATImage;
entry->encoder=(EncodeImageHandler *) WriteMATImage;
entry->flags^=CoderBlobSupportFlag;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r M A T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Method UnregisterMATImage removes format registrations made by the
% MAT module from the list of supported formats.
%
% The format of the UnregisterMATImage method is:
%
% UnregisterMATImage(void)
%
*/
ModuleExport void UnregisterMATImage(void)
{
(void) UnregisterMagickInfo("MAT");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e M A T L A B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Function WriteMATImage writes an Matlab matrix to a file.
%
% The format of the WriteMATImage method is:
%
% MagickBooleanType WriteMATImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: Specifies a pointer to a ImageInfo structure.
%
% o image: A pointer to an Image structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WriteMATImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
ssize_t y;
unsigned z;
register const Quantum *p;
unsigned int status;
int logging;
size_t DataSize;
char padding;
char MATLAB_HDR[0x80];
time_t current_time;
struct tm local_time;
unsigned char *pixels;
int is_gray;
MagickOffsetType
scene;
QuantumInfo
*quantum_info;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
logging=LogMagickEvent(CoderEvent,GetMagickModule(),"enter MAT");
(void) logging;
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(MagickFalse);
image->depth=8;
current_time=time((time_t *) NULL);
#if defined(MAGICKCORE_HAVE_LOCALTIME_R)
(void) localtime_r(¤t_time,&local_time);
#else
(void) memcpy(&local_time,localtime(¤t_time),sizeof(local_time));
#endif
(void) memset(MATLAB_HDR,' ',MagickMin(sizeof(MATLAB_HDR),124));
FormatLocaleString(MATLAB_HDR,sizeof(MATLAB_HDR),
"MATLAB 5.0 MAT-file, Platform: %s, Created on: %s %s %2d %2d:%2d:%2d %d",
OsDesc,DayOfWTab[local_time.tm_wday],MonthsTab[local_time.tm_mon],
local_time.tm_mday,local_time.tm_hour,local_time.tm_min,
local_time.tm_sec,local_time.tm_year+1900);
MATLAB_HDR[0x7C]=0;
MATLAB_HDR[0x7D]=1;
MATLAB_HDR[0x7E]='I';
MATLAB_HDR[0x7F]='M';
(void) WriteBlob(image,sizeof(MATLAB_HDR),(unsigned char *) MATLAB_HDR);
scene=0;
do
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
is_gray = SetImageGray(image,exception);
z = is_gray ? 0 : 3;
/*
Store MAT header.
*/
DataSize = image->rows /*Y*/ * image->columns /*X*/;
if(!is_gray) DataSize *= 3 /*Z*/;
padding=((unsigned char)(DataSize-1) & 0x7) ^ 0x7;
(void) WriteBlobLSBLong(image, miMATRIX);
(void) WriteBlobLSBLong(image, (unsigned int) DataSize+padding+(is_gray ? 48 : 56));
(void) WriteBlobLSBLong(image, 0x6); /* 0x88 */
(void) WriteBlobLSBLong(image, 0x8); /* 0x8C */
(void) WriteBlobLSBLong(image, 0x6); /* 0x90 */
(void) WriteBlobLSBLong(image, 0);
(void) WriteBlobLSBLong(image, 0x5); /* 0x98 */
(void) WriteBlobLSBLong(image, is_gray ? 0x8 : 0xC); /* 0x9C - DimFlag */
(void) WriteBlobLSBLong(image, (unsigned int) image->rows); /* x: 0xA0 */
(void) WriteBlobLSBLong(image, (unsigned int) image->columns); /* y: 0xA4 */
if(!is_gray)
{
(void) WriteBlobLSBLong(image, 3); /* z: 0xA8 */
(void) WriteBlobLSBLong(image, 0);
}
(void) WriteBlobLSBShort(image, 1); /* 0xB0 */
(void) WriteBlobLSBShort(image, 1); /* 0xB2 */
(void) WriteBlobLSBLong(image, 'M'); /* 0xB4 */
(void) WriteBlobLSBLong(image, 0x2); /* 0xB8 */
(void) WriteBlobLSBLong(image, (unsigned int) DataSize); /* 0xBC */
/*
Store image data.
*/
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
do
{
for (y=0; y < (ssize_t)image->columns; y++)
{
p=GetVirtualPixels(image,y,0,1,image->rows,exception);
if (p == (const Quantum *) NULL)
break;
(void) ExportQuantumPixels(image,(CacheView *) NULL,quantum_info,
z2qtype[z],pixels,exception);
(void) WriteBlob(image,image->rows,pixels);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
} while(z-- >= 2);
while(padding-->0) (void) WriteBlobByte(image,0);
quantum_info=DestroyQuantumInfo(quantum_info);
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,
GetImageListLength(image));
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_2737_0 |
crossvul-cpp_data_good_2526_0 | /*
* Timers abstract layer
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/sched/signal.h>
#include <sound/core.h>
#include <sound/timer.h>
#include <sound/control.h>
#include <sound/info.h>
#include <sound/minors.h>
#include <sound/initval.h>
#include <linux/kmod.h>
/* internal flags */
#define SNDRV_TIMER_IFLG_PAUSED 0x00010000
#if IS_ENABLED(CONFIG_SND_HRTIMER)
#define DEFAULT_TIMER_LIMIT 4
#else
#define DEFAULT_TIMER_LIMIT 1
#endif
static int timer_limit = DEFAULT_TIMER_LIMIT;
static int timer_tstamp_monotonic = 1;
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>");
MODULE_DESCRIPTION("ALSA timer interface");
MODULE_LICENSE("GPL");
module_param(timer_limit, int, 0444);
MODULE_PARM_DESC(timer_limit, "Maximum global timers in system.");
module_param(timer_tstamp_monotonic, int, 0444);
MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default).");
MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER);
MODULE_ALIAS("devname:snd/timer");
struct snd_timer_user {
struct snd_timer_instance *timeri;
int tread; /* enhanced read with timestamps and events */
unsigned long ticks;
unsigned long overrun;
int qhead;
int qtail;
int qused;
int queue_size;
bool disconnected;
struct snd_timer_read *queue;
struct snd_timer_tread *tqueue;
spinlock_t qlock;
unsigned long last_resolution;
unsigned int filter;
struct timespec tstamp; /* trigger tstamp */
wait_queue_head_t qchange_sleep;
struct fasync_struct *fasync;
struct mutex ioctl_lock;
};
/* list of timers */
static LIST_HEAD(snd_timer_list);
/* list of slave instances */
static LIST_HEAD(snd_timer_slave_list);
/* lock for slave active lists */
static DEFINE_SPINLOCK(slave_active_lock);
static DEFINE_MUTEX(register_mutex);
static int snd_timer_free(struct snd_timer *timer);
static int snd_timer_dev_free(struct snd_device *device);
static int snd_timer_dev_register(struct snd_device *device);
static int snd_timer_dev_disconnect(struct snd_device *device);
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left);
/*
* create a timer instance with the given owner string.
* when timer is not NULL, increments the module counter
*/
static struct snd_timer_instance *snd_timer_instance_new(char *owner,
struct snd_timer *timer)
{
struct snd_timer_instance *timeri;
timeri = kzalloc(sizeof(*timeri), GFP_KERNEL);
if (timeri == NULL)
return NULL;
timeri->owner = kstrdup(owner, GFP_KERNEL);
if (! timeri->owner) {
kfree(timeri);
return NULL;
}
INIT_LIST_HEAD(&timeri->open_list);
INIT_LIST_HEAD(&timeri->active_list);
INIT_LIST_HEAD(&timeri->ack_list);
INIT_LIST_HEAD(&timeri->slave_list_head);
INIT_LIST_HEAD(&timeri->slave_active_head);
timeri->timer = timer;
if (timer && !try_module_get(timer->module)) {
kfree(timeri->owner);
kfree(timeri);
return NULL;
}
return timeri;
}
/*
* find a timer instance from the given timer id
*/
static struct snd_timer *snd_timer_find(struct snd_timer_id *tid)
{
struct snd_timer *timer = NULL;
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->tmr_class != tid->dev_class)
continue;
if ((timer->tmr_class == SNDRV_TIMER_CLASS_CARD ||
timer->tmr_class == SNDRV_TIMER_CLASS_PCM) &&
(timer->card == NULL ||
timer->card->number != tid->card))
continue;
if (timer->tmr_device != tid->device)
continue;
if (timer->tmr_subdevice != tid->subdevice)
continue;
return timer;
}
return NULL;
}
#ifdef CONFIG_MODULES
static void snd_timer_request(struct snd_timer_id *tid)
{
switch (tid->dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
if (tid->device < timer_limit)
request_module("snd-timer-%i", tid->device);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (tid->card < snd_ecards_limit)
request_module("snd-card-%i", tid->card);
break;
default:
break;
}
}
#endif
/*
* look for a master instance matching with the slave id of the given slave.
* when found, relink the open_link of the slave.
*
* call this with register_mutex down.
*/
static void snd_timer_check_slave(struct snd_timer_instance *slave)
{
struct snd_timer *timer;
struct snd_timer_instance *master;
/* FIXME: it's really dumb to look up all entries.. */
list_for_each_entry(timer, &snd_timer_list, device_list) {
list_for_each_entry(master, &timer->open_list_head, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list,
&master->slave_list_head);
spin_lock_irq(&slave_active_lock);
slave->master = master;
slave->timer = master->timer;
spin_unlock_irq(&slave_active_lock);
return;
}
}
}
}
/*
* look for slave instances matching with the slave id of the given master.
* when found, relink the open_link of slaves.
*
* call this with register_mutex down.
*/
static void snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
/* check all pending slaves */
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list, &master->slave_list_head);
spin_lock_irq(&slave_active_lock);
spin_lock(&master->timer->lock);
slave->master = master;
slave->timer = master->timer;
if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
list_add_tail(&slave->active_list,
&master->slave_active_head);
spin_unlock(&master->timer->lock);
spin_unlock_irq(&slave_active_lock);
}
}
}
/*
* open a timer instance
* when opening a master, the slave id must be here given.
*/
int snd_timer_open(struct snd_timer_instance **ti,
char *owner, struct snd_timer_id *tid,
unsigned int slave_id)
{
struct snd_timer *timer;
struct snd_timer_instance *timeri = NULL;
if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
/* open a slave instance */
if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE ||
tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) {
pr_debug("ALSA: timer: invalid slave class %i\n",
tid->dev_sclass);
return -EINVAL;
}
mutex_lock(®ister_mutex);
timeri = snd_timer_instance_new(owner, NULL);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
snd_timer_check_slave(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
/* open a master instance */
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
#ifdef CONFIG_MODULES
if (!timer) {
mutex_unlock(®ister_mutex);
snd_timer_request(tid);
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
}
#endif
if (!timer) {
mutex_unlock(®ister_mutex);
return -ENODEV;
}
if (!list_empty(&timer->open_list_head)) {
timeri = list_entry(timer->open_list_head.next,
struct snd_timer_instance, open_list);
if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
mutex_unlock(®ister_mutex);
return -EBUSY;
}
}
timeri = snd_timer_instance_new(owner, timer);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
/* take a card refcount for safe disconnection */
if (timer->card)
get_device(&timer->card->card_dev);
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = slave_id;
if (list_empty(&timer->open_list_head) && timer->hw.open) {
int err = timer->hw.open(timer);
if (err) {
kfree(timeri->owner);
kfree(timeri);
if (timer->card)
put_device(&timer->card->card_dev);
module_put(timer->module);
mutex_unlock(®ister_mutex);
return err;
}
}
list_add_tail(&timeri->open_list, &timer->open_list_head);
snd_timer_check_master(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
/*
* close a timer instance
*/
int snd_timer_close(struct snd_timer_instance *timeri)
{
struct snd_timer *timer = NULL;
struct snd_timer_instance *slave, *tmp;
if (snd_BUG_ON(!timeri))
return -ENXIO;
mutex_lock(®ister_mutex);
list_del(&timeri->open_list);
/* force to stop the timer */
snd_timer_stop(timeri);
timer = timeri->timer;
if (timer) {
/* wait, until the active callback is finished */
spin_lock_irq(&timer->lock);
while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
spin_unlock_irq(&timer->lock);
udelay(10);
spin_lock_irq(&timer->lock);
}
spin_unlock_irq(&timer->lock);
/* remove slave links */
spin_lock_irq(&slave_active_lock);
spin_lock(&timer->lock);
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
open_list) {
list_move_tail(&slave->open_list, &snd_timer_slave_list);
slave->master = NULL;
slave->timer = NULL;
list_del_init(&slave->ack_list);
list_del_init(&slave->active_list);
}
spin_unlock(&timer->lock);
spin_unlock_irq(&slave_active_lock);
/* slave doesn't need to release timer resources below */
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
timer = NULL;
}
if (timeri->private_free)
timeri->private_free(timeri);
kfree(timeri->owner);
kfree(timeri);
if (timer) {
if (list_empty(&timer->open_list_head) && timer->hw.close)
timer->hw.close(timer);
/* release a card refcount for safe disconnection */
if (timer->card)
put_device(&timer->card->card_dev);
module_put(timer->module);
}
mutex_unlock(®ister_mutex);
return 0;
}
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
{
struct snd_timer * timer;
if (timeri == NULL)
return 0;
if ((timer = timeri->timer) != NULL) {
if (timer->hw.c_resolution)
return timer->hw.c_resolution(timer);
return timer->hw.resolution;
}
return 0;
}
static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
{
struct snd_timer *timer;
unsigned long resolution = 0;
struct snd_timer_instance *ts;
struct timespec tstamp;
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START ||
event > SNDRV_TIMER_EVENT_PAUSE))
return;
if (event == SNDRV_TIMER_EVENT_START ||
event == SNDRV_TIMER_EVENT_CONTINUE)
resolution = snd_timer_resolution(ti);
if (ti->ccallback)
ti->ccallback(ti, event, &tstamp, resolution);
if (ti->flags & SNDRV_TIMER_IFLG_SLAVE)
return;
timer = ti->timer;
if (timer == NULL)
return;
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
return;
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event + 100, &tstamp, resolution);
}
/* start/continue a master timer */
static int snd_timer_start1(struct snd_timer_instance *timeri,
bool start, unsigned long ticks)
{
struct snd_timer *timer;
int result;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (timer->card && timer->card->shutdown) {
result = -ENODEV;
goto unlock;
}
if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START)) {
result = -EBUSY;
goto unlock;
}
if (start)
timeri->ticks = timeri->cticks = ticks;
else if (!timeri->cticks)
timeri->cticks = 1;
timeri->pticks = 0;
list_move_tail(&timeri->active_list, &timer->active_list_head);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
goto __start_now;
timer->flags |= SNDRV_TIMER_FLG_RESCHED;
timeri->flags |= SNDRV_TIMER_IFLG_START;
result = 1; /* delayed start */
} else {
if (start)
timer->sticks = ticks;
timer->hw.start(timer);
__start_now:
timer->running++;
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
result = 0;
}
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* start/continue a slave timer */
static int snd_timer_start_slave(struct snd_timer_instance *timeri,
bool start)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master && timeri->timer) {
spin_lock(&timeri->timer->lock);
list_add_tail(&timeri->active_list,
&timeri->master->slave_active_head);
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 1; /* delayed start */
}
/* stop/pause a master timer */
static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
{
struct snd_timer *timer;
int result = 0;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START))) {
result = -EBUSY;
goto unlock;
}
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if (timer->card && timer->card->shutdown)
goto unlock;
if (stop) {
timeri->cticks = timeri->ticks;
timeri->pticks = 0;
}
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
timer->hw.stop(timer);
if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
snd_timer_reschedule(timer, 0);
if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
}
}
timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
if (stop)
timeri->flags &= ~SNDRV_TIMER_IFLG_PAUSED;
else
timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* stop/pause a slave timer */
static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
if (timeri->timer) {
spin_lock(&timeri->timer->lock);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 0;
}
/*
* start the timer instance
*/
int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
{
if (timeri == NULL || ticks < 1)
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, true);
else
return snd_timer_start1(timeri, true, ticks);
}
/*
* stop the timer instance.
*
* do not call this from the timer callback!
*/
int snd_timer_stop(struct snd_timer_instance *timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, true);
else
return snd_timer_stop1(timeri, true);
}
/*
* start again.. the tick is kept.
*/
int snd_timer_continue(struct snd_timer_instance *timeri)
{
/* timer can continue only after pause */
if (!(timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, false);
else
return snd_timer_start1(timeri, false, 0);
}
/*
* pause.. remember the ticks left
*/
int snd_timer_pause(struct snd_timer_instance * timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, false);
else
return snd_timer_stop1(timeri, false);
}
/*
* reschedule the timer
*
* start pending instances and check the scheduling ticks.
* when the scheduling ticks is changed set CHANGE flag to reprogram the timer.
*/
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti;
unsigned long ticks = ~0UL;
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->flags & SNDRV_TIMER_IFLG_START) {
ti->flags &= ~SNDRV_TIMER_IFLG_START;
ti->flags |= SNDRV_TIMER_IFLG_RUNNING;
timer->running++;
}
if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) {
if (ticks > ti->cticks)
ticks = ti->cticks;
}
}
if (ticks == ~0UL) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
return;
}
if (ticks > timer->hw.ticks)
ticks = timer->hw.ticks;
if (ticks_left != ticks)
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
timer->sticks = ticks;
}
/*
* timer tasklet
*
*/
static void snd_timer_tasklet(unsigned long arg)
{
struct snd_timer *timer = (struct snd_timer *) arg;
struct snd_timer_instance *ti;
struct list_head *p;
unsigned long resolution, ticks;
unsigned long flags;
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* now process all callbacks */
while (!list_empty(&timer->sack_list_head)) {
p = timer->sack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
resolution = ti->resolution;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* timer interrupt
*
* ticks_left is usually equal to timer->sticks.
*
*/
void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti, *ts, *tmp;
unsigned long resolution, ticks;
struct list_head *p, *ack_list_head;
unsigned long flags;
int use_tasklet = 0;
if (timer == NULL)
return;
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* remember the current resolution */
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
/* loop for all active instances
* Here we cannot use list_for_each_entry because the active_list of a
* processed instance is relinked to done_list_head before the callback
* is called.
*/
list_for_each_entry_safe(ti, tmp, &timer->active_list_head,
active_list) {
if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING))
continue;
ti->pticks += ticks_left;
ti->resolution = resolution;
if (ti->cticks < ticks_left)
ti->cticks = 0;
else
ti->cticks -= ticks_left;
if (ti->cticks) /* not expired */
continue;
if (ti->flags & SNDRV_TIMER_IFLG_AUTO) {
ti->cticks = ti->ticks;
} else {
ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
--timer->running;
list_del_init(&ti->active_list);
}
if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
(ti->flags & SNDRV_TIMER_IFLG_FAST))
ack_list_head = &timer->ack_list_head;
else
ack_list_head = &timer->sack_list_head;
if (list_empty(&ti->ack_list))
list_add_tail(&ti->ack_list, ack_list_head);
list_for_each_entry(ts, &ti->slave_active_head, active_list) {
ts->pticks = ti->pticks;
ts->resolution = resolution;
if (list_empty(&ts->ack_list))
list_add_tail(&ts->ack_list, ack_list_head);
}
}
if (timer->flags & SNDRV_TIMER_FLG_RESCHED)
snd_timer_reschedule(timer, timer->sticks);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_STOP) {
timer->hw.stop(timer);
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
}
if (!(timer->hw.flags & SNDRV_TIMER_HW_AUTO) ||
(timer->flags & SNDRV_TIMER_FLG_CHANGE)) {
/* restart timer */
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
} else {
timer->hw.stop(timer);
}
/* now process all fast callbacks */
while (!list_empty(&timer->ack_list_head)) {
p = timer->ack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
/* do we have any slow callbacks? */
use_tasklet = !list_empty(&timer->sack_list_head);
spin_unlock_irqrestore(&timer->lock, flags);
if (use_tasklet)
tasklet_schedule(&timer->task_queue);
}
/*
*/
int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
struct snd_timer **rtimer)
{
struct snd_timer *timer;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_timer_dev_free,
.dev_register = snd_timer_dev_register,
.dev_disconnect = snd_timer_dev_disconnect,
};
if (snd_BUG_ON(!tid))
return -EINVAL;
if (rtimer)
*rtimer = NULL;
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
timer->tmr_class = tid->dev_class;
timer->card = card;
timer->tmr_device = tid->device;
timer->tmr_subdevice = tid->subdevice;
if (id)
strlcpy(timer->id, id, sizeof(timer->id));
timer->sticks = 1;
INIT_LIST_HEAD(&timer->device_list);
INIT_LIST_HEAD(&timer->open_list_head);
INIT_LIST_HEAD(&timer->active_list_head);
INIT_LIST_HEAD(&timer->ack_list_head);
INIT_LIST_HEAD(&timer->sack_list_head);
spin_lock_init(&timer->lock);
tasklet_init(&timer->task_queue, snd_timer_tasklet,
(unsigned long)timer);
if (card != NULL) {
timer->module = card->module;
err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
if (err < 0) {
snd_timer_free(timer);
return err;
}
}
if (rtimer)
*rtimer = timer;
return 0;
}
static int snd_timer_free(struct snd_timer *timer)
{
if (!timer)
return 0;
mutex_lock(®ister_mutex);
if (! list_empty(&timer->open_list_head)) {
struct list_head *p, *n;
struct snd_timer_instance *ti;
pr_warn("ALSA: timer %p is busy?\n", timer);
list_for_each_safe(p, n, &timer->open_list_head) {
list_del_init(p);
ti = list_entry(p, struct snd_timer_instance, open_list);
ti->timer = NULL;
}
}
list_del(&timer->device_list);
mutex_unlock(®ister_mutex);
if (timer->private_free)
timer->private_free(timer);
kfree(timer);
return 0;
}
static int snd_timer_dev_free(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
return snd_timer_free(timer);
}
static int snd_timer_dev_register(struct snd_device *dev)
{
struct snd_timer *timer = dev->device_data;
struct snd_timer *timer1;
if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop))
return -ENXIO;
if (!(timer->hw.flags & SNDRV_TIMER_HW_SLAVE) &&
!timer->hw.resolution && timer->hw.c_resolution == NULL)
return -EINVAL;
mutex_lock(®ister_mutex);
list_for_each_entry(timer1, &snd_timer_list, device_list) {
if (timer1->tmr_class > timer->tmr_class)
break;
if (timer1->tmr_class < timer->tmr_class)
continue;
if (timer1->card && timer->card) {
if (timer1->card->number > timer->card->number)
break;
if (timer1->card->number < timer->card->number)
continue;
}
if (timer1->tmr_device > timer->tmr_device)
break;
if (timer1->tmr_device < timer->tmr_device)
continue;
if (timer1->tmr_subdevice > timer->tmr_subdevice)
break;
if (timer1->tmr_subdevice < timer->tmr_subdevice)
continue;
/* conflicts.. */
mutex_unlock(®ister_mutex);
return -EBUSY;
}
list_add_tail(&timer->device_list, &timer1->device_list);
mutex_unlock(®ister_mutex);
return 0;
}
static int snd_timer_dev_disconnect(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_del_init(&timer->device_list);
/* wake up pending sleepers */
list_for_each_entry(ti, &timer->open_list_head, open_list) {
if (ti->disconnect)
ti->disconnect(ti);
}
mutex_unlock(®ister_mutex);
return 0;
}
void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp)
{
unsigned long flags;
unsigned long resolution = 0;
struct snd_timer_instance *ti, *ts;
if (timer->card && timer->card->shutdown)
return;
if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
return;
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
event > SNDRV_TIMER_EVENT_MRESUME))
return;
spin_lock_irqsave(&timer->lock, flags);
if (event == SNDRV_TIMER_EVENT_MSTART ||
event == SNDRV_TIMER_EVENT_MCONTINUE ||
event == SNDRV_TIMER_EVENT_MRESUME) {
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
}
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->ccallback)
ti->ccallback(ti, event, tstamp, resolution);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event, tstamp, resolution);
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* exported functions for global timers
*/
int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer)
{
struct snd_timer_id tid;
tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL;
tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE;
tid.card = -1;
tid.device = device;
tid.subdevice = 0;
return snd_timer_new(NULL, id, &tid, rtimer);
}
int snd_timer_global_free(struct snd_timer *timer)
{
return snd_timer_free(timer);
}
int snd_timer_global_register(struct snd_timer *timer)
{
struct snd_device dev;
memset(&dev, 0, sizeof(dev));
dev.device_data = timer;
return snd_timer_dev_register(&dev);
}
/*
* System timer
*/
struct snd_timer_system_private {
struct timer_list tlist;
unsigned long last_expires;
unsigned long last_jiffies;
unsigned long correction;
};
static void snd_timer_s_function(unsigned long data)
{
struct snd_timer *timer = (struct snd_timer *)data;
struct snd_timer_system_private *priv = timer->private_data;
unsigned long jiff = jiffies;
if (time_after(jiff, priv->last_expires))
priv->correction += (long)jiff - (long)priv->last_expires;
snd_timer_interrupt(timer, (long)jiff - (long)priv->last_jiffies);
}
static int snd_timer_s_start(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long njiff;
priv = (struct snd_timer_system_private *) timer->private_data;
njiff = (priv->last_jiffies = jiffies);
if (priv->correction > timer->sticks - 1) {
priv->correction -= timer->sticks - 1;
njiff++;
} else {
njiff += timer->sticks - priv->correction;
priv->correction = 0;
}
priv->last_expires = njiff;
mod_timer(&priv->tlist, njiff);
return 0;
}
static int snd_timer_s_stop(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long jiff;
priv = (struct snd_timer_system_private *) timer->private_data;
del_timer(&priv->tlist);
jiff = jiffies;
if (time_before(jiff, priv->last_expires))
timer->sticks = priv->last_expires - jiff;
else
timer->sticks = 1;
priv->correction = 0;
return 0;
}
static int snd_timer_s_close(struct snd_timer *timer)
{
struct snd_timer_system_private *priv;
priv = (struct snd_timer_system_private *)timer->private_data;
del_timer_sync(&priv->tlist);
return 0;
}
static struct snd_timer_hardware snd_timer_system =
{
.flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET,
.resolution = 1000000000L / HZ,
.ticks = 10000000L,
.close = snd_timer_s_close,
.start = snd_timer_s_start,
.stop = snd_timer_s_stop
};
static void snd_timer_free_system(struct snd_timer *timer)
{
kfree(timer->private_data);
}
static int snd_timer_register_system(void)
{
struct snd_timer *timer;
struct snd_timer_system_private *priv;
int err;
err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer);
if (err < 0)
return err;
strcpy(timer->name, "system timer");
timer->hw = snd_timer_system;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL) {
snd_timer_free(timer);
return -ENOMEM;
}
setup_timer(&priv->tlist, snd_timer_s_function, (unsigned long) timer);
timer->private_data = priv;
timer->private_free = snd_timer_free_system;
return snd_timer_global_register(timer);
}
#ifdef CONFIG_SND_PROC_FS
/*
* Info interface
*/
static void snd_timer_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_timer *timer;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->card && timer->card->shutdown)
continue;
switch (timer->tmr_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
snd_iprintf(buffer, "G%i: ", timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_CARD:
snd_iprintf(buffer, "C%i-%i: ",
timer->card->number, timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_PCM:
snd_iprintf(buffer, "P%i-%i-%i: ", timer->card->number,
timer->tmr_device, timer->tmr_subdevice);
break;
default:
snd_iprintf(buffer, "?%i-%i-%i-%i: ", timer->tmr_class,
timer->card ? timer->card->number : -1,
timer->tmr_device, timer->tmr_subdevice);
}
snd_iprintf(buffer, "%s :", timer->name);
if (timer->hw.resolution)
snd_iprintf(buffer, " %lu.%03luus (%lu ticks)",
timer->hw.resolution / 1000,
timer->hw.resolution % 1000,
timer->hw.ticks);
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
snd_iprintf(buffer, " SLAVE");
snd_iprintf(buffer, "\n");
list_for_each_entry(ti, &timer->open_list_head, open_list)
snd_iprintf(buffer, " Client %s : %s\n",
ti->owner ? ti->owner : "unknown",
ti->flags & (SNDRV_TIMER_IFLG_START |
SNDRV_TIMER_IFLG_RUNNING)
? "running" : "stopped");
}
mutex_unlock(®ister_mutex);
}
static struct snd_info_entry *snd_timer_proc_entry;
static void __init snd_timer_proc_init(void)
{
struct snd_info_entry *entry;
entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL);
if (entry != NULL) {
entry->c.text.read = snd_timer_proc_read;
if (snd_info_register(entry) < 0) {
snd_info_free_entry(entry);
entry = NULL;
}
}
snd_timer_proc_entry = entry;
}
static void __exit snd_timer_proc_done(void)
{
snd_info_free_entry(snd_timer_proc_entry);
}
#else /* !CONFIG_SND_PROC_FS */
#define snd_timer_proc_init()
#define snd_timer_proc_done()
#endif
/*
* USER SPACE interface
*/
static void snd_timer_user_interrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_read *r;
int prev;
spin_lock(&tu->qlock);
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->queue[prev];
if (r->resolution == resolution) {
r->ticks += ticks;
goto __wake;
}
}
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
r = &tu->queue[tu->qtail++];
tu->qtail %= tu->queue_size;
r->resolution = resolution;
r->ticks = ticks;
tu->qused++;
}
__wake:
spin_unlock(&tu->qlock);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_append_to_tqueue(struct snd_timer_user *tu,
struct snd_timer_tread *tread)
{
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread));
tu->qtail %= tu->queue_size;
tu->qused++;
}
}
static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
int event,
struct timespec *tstamp,
unsigned long resolution)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread r1;
unsigned long flags;
if (event >= SNDRV_TIMER_EVENT_START &&
event <= SNDRV_TIMER_EVENT_PAUSE)
tu->tstamp = *tstamp;
if ((tu->filter & (1 << event)) == 0 || !tu->tread)
return;
memset(&r1, 0, sizeof(r1));
r1.event = event;
r1.tstamp = *tstamp;
r1.val = resolution;
spin_lock_irqsave(&tu->qlock, flags);
snd_timer_user_append_to_tqueue(tu, &r1);
spin_unlock_irqrestore(&tu->qlock, flags);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_disconnect(struct snd_timer_instance *timeri)
{
struct snd_timer_user *tu = timeri->callback_data;
tu->disconnected = true;
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread *r, r1;
struct timespec tstamp;
int prev, append = 0;
memset(&r1, 0, sizeof(r1));
memset(&tstamp, 0, sizeof(tstamp));
spin_lock(&tu->qlock);
if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) |
(1 << SNDRV_TIMER_EVENT_TICK))) == 0) {
spin_unlock(&tu->qlock);
return;
}
if (tu->last_resolution != resolution || ticks > 0) {
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
tu->last_resolution != resolution) {
r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
r1.tstamp = tstamp;
r1.val = resolution;
snd_timer_user_append_to_tqueue(tu, &r1);
tu->last_resolution = resolution;
append++;
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0)
goto __wake;
if (ticks == 0)
goto __wake;
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->tqueue[prev];
if (r->event == SNDRV_TIMER_EVENT_TICK) {
r->tstamp = tstamp;
r->val += ticks;
append++;
goto __wake;
}
}
r1.event = SNDRV_TIMER_EVENT_TICK;
r1.tstamp = tstamp;
r1.val = ticks;
snd_timer_user_append_to_tqueue(tu, &r1);
append++;
__wake:
spin_unlock(&tu->qlock);
if (append == 0)
return;
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static int snd_timer_user_open(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (tu == NULL)
return -ENOMEM;
spin_lock_init(&tu->qlock);
init_waitqueue_head(&tu->qchange_sleep);
mutex_init(&tu->ioctl_lock);
tu->ticks = 1;
tu->queue_size = 128;
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL) {
kfree(tu);
return -ENOMEM;
}
file->private_data = tu;
return 0;
}
static int snd_timer_user_release(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
if (file->private_data) {
tu = file->private_data;
file->private_data = NULL;
mutex_lock(&tu->ioctl_lock);
if (tu->timeri)
snd_timer_close(tu->timeri);
mutex_unlock(&tu->ioctl_lock);
kfree(tu->queue);
kfree(tu->tqueue);
kfree(tu);
}
return 0;
}
static void snd_timer_user_zero_id(struct snd_timer_id *id)
{
id->dev_class = SNDRV_TIMER_CLASS_NONE;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = -1;
id->device = -1;
id->subdevice = -1;
}
static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer)
{
id->dev_class = timer->tmr_class;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = timer->card ? timer->card->number : -1;
id->device = timer->tmr_device;
id->subdevice = timer->tmr_subdevice;
}
static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
{
struct snd_timer_id id;
struct snd_timer *timer;
struct list_head *p;
if (copy_from_user(&id, _tid, sizeof(id)))
return -EFAULT;
mutex_lock(®ister_mutex);
if (id.dev_class < 0) { /* first item */
if (list_empty(&snd_timer_list))
snd_timer_user_zero_id(&id);
else {
timer = list_entry(snd_timer_list.next,
struct snd_timer, device_list);
snd_timer_user_copy_id(&id, timer);
}
} else {
switch (id.dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
id.device = id.device < 0 ? 0 : id.device + 1;
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device >= id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (id.card < 0) {
id.card = 0;
} else {
if (id.device < 0) {
id.device = 0;
} else {
if (id.subdevice < 0)
id.subdevice = 0;
else
id.subdevice++;
}
}
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > id.dev_class) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_class < id.dev_class)
continue;
if (timer->card->number > id.card) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->card->number < id.card)
continue;
if (timer->tmr_device > id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device < id.device)
continue;
if (timer->tmr_subdevice > id.subdevice) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_subdevice < id.subdevice)
continue;
snd_timer_user_copy_id(&id, timer);
break;
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
default:
snd_timer_user_zero_id(&id);
}
}
mutex_unlock(®ister_mutex);
if (copy_to_user(_tid, &id, sizeof(*_tid)))
return -EFAULT;
return 0;
}
static int snd_timer_user_ginfo(struct file *file,
struct snd_timer_ginfo __user *_ginfo)
{
struct snd_timer_ginfo *ginfo;
struct snd_timer_id tid;
struct snd_timer *t;
struct list_head *p;
int err = 0;
ginfo = memdup_user(_ginfo, sizeof(*ginfo));
if (IS_ERR(ginfo))
return PTR_ERR(ginfo);
tid = ginfo->tid;
memset(ginfo, 0, sizeof(*ginfo));
ginfo->tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
ginfo->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
ginfo->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(ginfo->id, t->id, sizeof(ginfo->id));
strlcpy(ginfo->name, t->name, sizeof(ginfo->name));
ginfo->resolution = t->hw.resolution;
if (t->hw.resolution_min > 0) {
ginfo->resolution_min = t->hw.resolution_min;
ginfo->resolution_max = t->hw.resolution_max;
}
list_for_each(p, &t->open_list_head) {
ginfo->clients++;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_ginfo, ginfo, sizeof(*ginfo)))
err = -EFAULT;
kfree(ginfo);
return err;
}
static int timer_set_gparams(struct snd_timer_gparams *gparams)
{
struct snd_timer *t;
int err;
mutex_lock(®ister_mutex);
t = snd_timer_find(&gparams->tid);
if (!t) {
err = -ENODEV;
goto _error;
}
if (!list_empty(&t->open_list_head)) {
err = -EBUSY;
goto _error;
}
if (!t->hw.set_period) {
err = -ENOSYS;
goto _error;
}
err = t->hw.set_period(t, gparams->period_num, gparams->period_den);
_error:
mutex_unlock(®ister_mutex);
return err;
}
static int snd_timer_user_gparams(struct file *file,
struct snd_timer_gparams __user *_gparams)
{
struct snd_timer_gparams gparams;
if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
return -EFAULT;
return timer_set_gparams(&gparams);
}
static int snd_timer_user_gstatus(struct file *file,
struct snd_timer_gstatus __user *_gstatus)
{
struct snd_timer_gstatus gstatus;
struct snd_timer_id tid;
struct snd_timer *t;
int err = 0;
if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus)))
return -EFAULT;
tid = gstatus.tid;
memset(&gstatus, 0, sizeof(gstatus));
gstatus.tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
if (t->hw.c_resolution)
gstatus.resolution = t->hw.c_resolution(t);
else
gstatus.resolution = t->hw.resolution;
if (t->hw.precise_resolution) {
t->hw.precise_resolution(t, &gstatus.resolution_num,
&gstatus.resolution_den);
} else {
gstatus.resolution_num = gstatus.resolution;
gstatus.resolution_den = 1000000000uL;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus)))
err = -EFAULT;
return err;
}
static int snd_timer_user_tselect(struct file *file,
struct snd_timer_select __user *_tselect)
{
struct snd_timer_user *tu;
struct snd_timer_select tselect;
char str[32];
int err = 0;
tu = file->private_data;
if (tu->timeri) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
err = -EFAULT;
goto __err;
}
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
if (err < 0)
goto __err;
tu->qhead = tu->qtail = tu->qused = 0;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
tu->tqueue = NULL;
if (tu->tread) {
tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread),
GFP_KERNEL);
if (tu->tqueue == NULL)
err = -ENOMEM;
} else {
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL)
err = -ENOMEM;
}
if (err < 0) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
} else {
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
? snd_timer_user_tinterrupt : snd_timer_user_interrupt;
tu->timeri->ccallback = snd_timer_user_ccallback;
tu->timeri->callback_data = (void *)tu;
tu->timeri->disconnect = snd_timer_user_disconnect;
}
__err:
return err;
}
static int snd_timer_user_info(struct file *file,
struct snd_timer_info __user *_info)
{
struct snd_timer_user *tu;
struct snd_timer_info *info;
struct snd_timer *t;
int err = 0;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (! info)
return -ENOMEM;
info->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
info->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(info->id, t->id, sizeof(info->id));
strlcpy(info->name, t->name, sizeof(info->name));
info->resolution = t->hw.resolution;
if (copy_to_user(_info, info, sizeof(*_info)))
err = -EFAULT;
kfree(info);
return err;
}
static int snd_timer_user_params(struct file *file,
struct snd_timer_params __user *_params)
{
struct snd_timer_user *tu;
struct snd_timer_params params;
struct snd_timer *t;
struct snd_timer_read *tr;
struct snd_timer_tread *ttr;
int err;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
if (copy_from_user(¶ms, _params, sizeof(params)))
return -EFAULT;
if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) {
u64 resolution;
if (params.ticks < 1) {
err = -EINVAL;
goto _end;
}
/* Don't allow resolution less than 1ms */
resolution = snd_timer_resolution(tu->timeri);
resolution *= params.ticks;
if (resolution < 1000000) {
err = -EINVAL;
goto _end;
}
}
if (params.queue_size > 0 &&
(params.queue_size < 32 || params.queue_size > 1024)) {
err = -EINVAL;
goto _end;
}
if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)|
(1<<SNDRV_TIMER_EVENT_TICK)|
(1<<SNDRV_TIMER_EVENT_START)|
(1<<SNDRV_TIMER_EVENT_STOP)|
(1<<SNDRV_TIMER_EVENT_CONTINUE)|
(1<<SNDRV_TIMER_EVENT_PAUSE)|
(1<<SNDRV_TIMER_EVENT_SUSPEND)|
(1<<SNDRV_TIMER_EVENT_RESUME)|
(1<<SNDRV_TIMER_EVENT_MSTART)|
(1<<SNDRV_TIMER_EVENT_MSTOP)|
(1<<SNDRV_TIMER_EVENT_MCONTINUE)|
(1<<SNDRV_TIMER_EVENT_MPAUSE)|
(1<<SNDRV_TIMER_EVENT_MSUSPEND)|
(1<<SNDRV_TIMER_EVENT_MRESUME))) {
err = -EINVAL;
goto _end;
}
snd_timer_stop(tu->timeri);
spin_lock_irq(&t->lock);
tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO|
SNDRV_TIMER_IFLG_EXCLUSIVE|
SNDRV_TIMER_IFLG_EARLY_EVENT);
if (params.flags & SNDRV_TIMER_PSFLG_AUTO)
tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO;
if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE;
if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT;
spin_unlock_irq(&t->lock);
if (params.queue_size > 0 &&
(unsigned int)tu->queue_size != params.queue_size) {
if (tu->tread) {
ttr = kmalloc(params.queue_size * sizeof(*ttr),
GFP_KERNEL);
if (ttr) {
kfree(tu->tqueue);
tu->queue_size = params.queue_size;
tu->tqueue = ttr;
}
} else {
tr = kmalloc(params.queue_size * sizeof(*tr),
GFP_KERNEL);
if (tr) {
kfree(tu->queue);
tu->queue_size = params.queue_size;
tu->queue = tr;
}
}
}
tu->qhead = tu->qtail = tu->qused = 0;
if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
if (tu->tread) {
struct snd_timer_tread tread;
memset(&tread, 0, sizeof(tread));
tread.event = SNDRV_TIMER_EVENT_EARLY;
tread.tstamp.tv_sec = 0;
tread.tstamp.tv_nsec = 0;
tread.val = 0;
snd_timer_user_append_to_tqueue(tu, &tread);
} else {
struct snd_timer_read *r = &tu->queue[0];
r->resolution = 0;
r->ticks = 0;
tu->qused++;
tu->qtail++;
}
}
tu->filter = params.filter;
tu->ticks = params.ticks;
err = 0;
_end:
if (copy_to_user(_params, ¶ms, sizeof(params)))
return -EFAULT;
return err;
}
static int snd_timer_user_status(struct file *file,
struct snd_timer_status __user *_status)
{
struct snd_timer_user *tu;
struct snd_timer_status status;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
memset(&status, 0, sizeof(status));
status.tstamp = tu->tstamp;
status.resolution = snd_timer_resolution(tu->timeri);
status.lost = tu->timeri->lost;
status.overrun = tu->overrun;
spin_lock_irq(&tu->qlock);
status.queue = tu->qused;
spin_unlock_irq(&tu->qlock);
if (copy_to_user(_status, &status, sizeof(status)))
return -EFAULT;
return 0;
}
static int snd_timer_user_start(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
snd_timer_stop(tu->timeri);
tu->timeri->lost = 0;
tu->last_resolution = 0;
return (err = snd_timer_start(tu->timeri, tu->ticks)) < 0 ? err : 0;
}
static int snd_timer_user_stop(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_stop(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_continue(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
/* start timer instead of continue if it's not used before */
if (!(tu->timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
return snd_timer_user_start(file);
tu->timeri->lost = 0;
return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_pause(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_pause(tu->timeri)) < 0 ? err : 0;
}
enum {
SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20),
SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21),
SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22),
SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
};
static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu;
void __user *argp = (void __user *)arg;
int __user *p = argp;
tu = file->private_data;
switch (cmd) {
case SNDRV_TIMER_IOCTL_PVERSION:
return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0;
case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
return snd_timer_user_next_device(argp);
case SNDRV_TIMER_IOCTL_TREAD:
{
int xarg;
if (tu->timeri) /* too late */
return -EBUSY;
if (get_user(xarg, p))
return -EFAULT;
tu->tread = xarg ? 1 : 0;
return 0;
}
case SNDRV_TIMER_IOCTL_GINFO:
return snd_timer_user_ginfo(file, argp);
case SNDRV_TIMER_IOCTL_GPARAMS:
return snd_timer_user_gparams(file, argp);
case SNDRV_TIMER_IOCTL_GSTATUS:
return snd_timer_user_gstatus(file, argp);
case SNDRV_TIMER_IOCTL_SELECT:
return snd_timer_user_tselect(file, argp);
case SNDRV_TIMER_IOCTL_INFO:
return snd_timer_user_info(file, argp);
case SNDRV_TIMER_IOCTL_PARAMS:
return snd_timer_user_params(file, argp);
case SNDRV_TIMER_IOCTL_STATUS:
return snd_timer_user_status(file, argp);
case SNDRV_TIMER_IOCTL_START:
case SNDRV_TIMER_IOCTL_START_OLD:
return snd_timer_user_start(file);
case SNDRV_TIMER_IOCTL_STOP:
case SNDRV_TIMER_IOCTL_STOP_OLD:
return snd_timer_user_stop(file);
case SNDRV_TIMER_IOCTL_CONTINUE:
case SNDRV_TIMER_IOCTL_CONTINUE_OLD:
return snd_timer_user_continue(file);
case SNDRV_TIMER_IOCTL_PAUSE:
case SNDRV_TIMER_IOCTL_PAUSE_OLD:
return snd_timer_user_pause(file);
}
return -ENOTTY;
}
static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu = file->private_data;
long ret;
mutex_lock(&tu->ioctl_lock);
ret = __snd_timer_user_ioctl(file, cmd, arg);
mutex_unlock(&tu->ioctl_lock);
return ret;
}
static int snd_timer_user_fasync(int fd, struct file * file, int on)
{
struct snd_timer_user *tu;
tu = file->private_data;
return fasync_helper(fd, file, on, &tu->fasync);
}
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
size_t count, loff_t *offset)
{
struct snd_timer_user *tu;
long result = 0, unit;
int qhead;
int err = 0;
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto _error;
}
set_current_state(TASK_INTERRUPTIBLE);
init_waitqueue_entry(&wait, current);
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
mutex_unlock(&tu->ioctl_lock);
schedule();
mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
if (tu->disconnected) {
err = -ENODEV;
goto _error;
}
if (signal_pending(current)) {
err = -ERESTARTSYS;
goto _error;
}
}
qhead = tu->qhead++;
tu->qhead %= tu->queue_size;
tu->qused--;
spin_unlock_irq(&tu->qlock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
err = -EFAULT;
} else {
if (copy_to_user(buffer, &tu->queue[qhead],
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
spin_lock_irq(&tu->qlock);
if (err < 0)
goto _error;
result += unit;
buffer += unit;
}
_error:
spin_unlock_irq(&tu->qlock);
mutex_unlock(&tu->ioctl_lock);
return result > 0 ? result : err;
}
static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
{
unsigned int mask;
struct snd_timer_user *tu;
tu = file->private_data;
poll_wait(file, &tu->qchange_sleep, wait);
mask = 0;
if (tu->qused)
mask |= POLLIN | POLLRDNORM;
if (tu->disconnected)
mask |= POLLERR;
return mask;
}
#ifdef CONFIG_COMPAT
#include "timer_compat.c"
#else
#define snd_timer_user_ioctl_compat NULL
#endif
static const struct file_operations snd_timer_f_ops =
{
.owner = THIS_MODULE,
.read = snd_timer_user_read,
.open = snd_timer_user_open,
.release = snd_timer_user_release,
.llseek = no_llseek,
.poll = snd_timer_user_poll,
.unlocked_ioctl = snd_timer_user_ioctl,
.compat_ioctl = snd_timer_user_ioctl_compat,
.fasync = snd_timer_user_fasync,
};
/* unregister the system timer */
static void snd_timer_free_all(void)
{
struct snd_timer *timer, *n;
list_for_each_entry_safe(timer, n, &snd_timer_list, device_list)
snd_timer_free(timer);
}
static struct device timer_dev;
/*
* ENTRY functions
*/
static int __init alsa_timer_init(void)
{
int err;
snd_device_initialize(&timer_dev, NULL);
dev_set_name(&timer_dev, "timer");
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1,
"system timer");
#endif
err = snd_timer_register_system();
if (err < 0) {
pr_err("ALSA: unable to register system timer (%i)\n", err);
put_device(&timer_dev);
return err;
}
err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0,
&snd_timer_f_ops, NULL, &timer_dev);
if (err < 0) {
pr_err("ALSA: unable to register timer device (%i)\n", err);
snd_timer_free_all();
put_device(&timer_dev);
return err;
}
snd_timer_proc_init();
return 0;
}
static void __exit alsa_timer_exit(void)
{
snd_unregister_device(&timer_dev);
snd_timer_free_all();
put_device(&timer_dev);
snd_timer_proc_done();
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1);
#endif
}
module_init(alsa_timer_init)
module_exit(alsa_timer_exit)
EXPORT_SYMBOL(snd_timer_open);
EXPORT_SYMBOL(snd_timer_close);
EXPORT_SYMBOL(snd_timer_resolution);
EXPORT_SYMBOL(snd_timer_start);
EXPORT_SYMBOL(snd_timer_stop);
EXPORT_SYMBOL(snd_timer_continue);
EXPORT_SYMBOL(snd_timer_pause);
EXPORT_SYMBOL(snd_timer_new);
EXPORT_SYMBOL(snd_timer_notify);
EXPORT_SYMBOL(snd_timer_global_new);
EXPORT_SYMBOL(snd_timer_global_free);
EXPORT_SYMBOL(snd_timer_global_register);
EXPORT_SYMBOL(snd_timer_interrupt);
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_2526_0 |
crossvul-cpp_data_bad_5500_0 | /******************************************************************************
* emulate.c
*
* Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
*
* Copyright (c) 2005 Keir Fraser
*
* Linux coding style, mod r/m decoder, segment base fixes, real-mode
* privileged instructions:
*
* Copyright (C) 2006 Qumranet
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Avi Kivity <avi@qumranet.com>
* Yaniv Kamay <yaniv@qumranet.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
* From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
*/
#include <linux/kvm_host.h>
#include "kvm_cache_regs.h"
#include <asm/kvm_emulate.h>
#include <linux/stringify.h>
#include <asm/debugreg.h>
#include "x86.h"
#include "tss.h"
/*
* Operand types
*/
#define OpNone 0ull
#define OpImplicit 1ull /* No generic decode */
#define OpReg 2ull /* Register */
#define OpMem 3ull /* Memory */
#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
#define OpDI 5ull /* ES:DI/EDI/RDI */
#define OpMem64 6ull /* Memory, 64-bit */
#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
#define OpDX 8ull /* DX register */
#define OpCL 9ull /* CL register (for shifts) */
#define OpImmByte 10ull /* 8-bit sign extended immediate */
#define OpOne 11ull /* Implied 1 */
#define OpImm 12ull /* Sign extended up to 32-bit immediate */
#define OpMem16 13ull /* Memory operand (16-bit). */
#define OpMem32 14ull /* Memory operand (32-bit). */
#define OpImmU 15ull /* Immediate operand, zero extended */
#define OpSI 16ull /* SI/ESI/RSI */
#define OpImmFAddr 17ull /* Immediate far address */
#define OpMemFAddr 18ull /* Far address in memory */
#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
#define OpES 20ull /* ES */
#define OpCS 21ull /* CS */
#define OpSS 22ull /* SS */
#define OpDS 23ull /* DS */
#define OpFS 24ull /* FS */
#define OpGS 25ull /* GS */
#define OpMem8 26ull /* 8-bit zero extended memory operand */
#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
#define OpBits 5 /* Width of operand field */
#define OpMask ((1ull << OpBits) - 1)
/*
* Opcode effective-address decode tables.
* Note that we only emulate instructions that have at least one memory
* operand (excluding implicit stack references). We assume that stack
* references and instruction fetches will never occur in special memory
* areas that require emulation. So, for example, 'mov <imm>,<reg>' need
* not be handled.
*/
/* Operand sizes: 8-bit operands or specified/overridden size. */
#define ByteOp (1<<0) /* 8-bit operands. */
/* Destination operand type. */
#define DstShift 1
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg (OpReg << DstShift)
#define DstMem (OpMem << DstShift)
#define DstAcc (OpAcc << DstShift)
#define DstDI (OpDI << DstShift)
#define DstMem64 (OpMem64 << DstShift)
#define DstMem16 (OpMem16 << DstShift)
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX (OpDX << DstShift)
#define DstAccLo (OpAccLo << DstShift)
#define DstMask (OpMask << DstShift)
/* Source operand type. */
#define SrcShift 6
#define SrcNone (OpNone << SrcShift)
#define SrcReg (OpReg << SrcShift)
#define SrcMem (OpMem << SrcShift)
#define SrcMem16 (OpMem16 << SrcShift)
#define SrcMem32 (OpMem32 << SrcShift)
#define SrcImm (OpImm << SrcShift)
#define SrcImmByte (OpImmByte << SrcShift)
#define SrcOne (OpOne << SrcShift)
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU (OpImmU << SrcShift)
#define SrcSI (OpSI << SrcShift)
#define SrcXLat (OpXLat << SrcShift)
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc (OpAcc << SrcShift)
#define SrcImmU16 (OpImmU16 << SrcShift)
#define SrcImm64 (OpImm64 << SrcShift)
#define SrcDX (OpDX << SrcShift)
#define SrcMem8 (OpMem8 << SrcShift)
#define SrcAccHi (OpAccHi << SrcShift)
#define SrcMask (OpMask << SrcShift)
#define BitOp (1<<11)
#define MemAbs (1<<12) /* Memory operand is absolute displacement */
#define String (1<<13) /* String instruction (rep capable) */
#define Stack (1<<14) /* Stack instruction (push/pop) */
#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
#define Escape (5<<15) /* Escape to coprocessor instruction */
#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
#define ModeDual (7<<15) /* Different instruction for 32/64 bit */
#define Sse (1<<18) /* SSE Vector instruction */
/* Generic ModRM decode. */
#define ModRM (1<<19)
/* Destination is only written; never read. */
#define Mov (1<<20)
/* Misc flags */
#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
#define Undefined (1<<25) /* No Such Instruction */
#define Lock (1<<26) /* lock prefix is allowed for the instruction */
#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
#define No64 (1<<28)
#define PageTable (1 << 29) /* instruction used to write page table */
#define NotImpl (1 << 30) /* instruction is not implemented */
/* Source 2 operand type */
#define Src2Shift (31)
#define Src2None (OpNone << Src2Shift)
#define Src2Mem (OpMem << Src2Shift)
#define Src2CL (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2One (OpOne << Src2Shift)
#define Src2Imm (OpImm << Src2Shift)
#define Src2ES (OpES << Src2Shift)
#define Src2CS (OpCS << Src2Shift)
#define Src2SS (OpSS << Src2Shift)
#define Src2DS (OpDS << Src2Shift)
#define Src2FS (OpFS << Src2Shift)
#define Src2GS (OpGS << Src2Shift)
#define Src2Mask (OpMask << Src2Shift)
#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
#define NoWrite ((u64)1 << 45) /* No writeback */
#define SrcWrite ((u64)1 << 46) /* Write back src operand */
#define NoMod ((u64)1 << 47) /* Mod field is ignored */
#define Intercept ((u64)1 << 48) /* Has valid intercept field */
#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
#define NearBranch ((u64)1 << 52) /* Near branches */
#define No16 ((u64)1 << 53) /* No 16 bit operand */
#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
#define FASTOP_SIZE 8
/*
* fastop functions have a special calling convention:
*
* dst: rax (in/out)
* src: rdx (in/out)
* src2: rcx (in)
* flags: rflags (in/out)
* ex: rsi (in:fastop pointer, out:zero if exception)
*
* Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
* different operand sizes can be reached by calculation, rather than a jump
* table (which would be bigger than the code).
*
* fastop functions are declared as taking a never-defined fastop parameter,
* so they can't be called from C directly.
*/
struct fastop;
struct opcode {
u64 flags : 56;
u64 intercept : 8;
union {
int (*execute)(struct x86_emulate_ctxt *ctxt);
const struct opcode *group;
const struct group_dual *gdual;
const struct gprefix *gprefix;
const struct escape *esc;
const struct instr_dual *idual;
const struct mode_dual *mdual;
void (*fastop)(struct fastop *fake);
} u;
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
};
struct group_dual {
struct opcode mod012[8];
struct opcode mod3[8];
};
struct gprefix {
struct opcode pfx_no;
struct opcode pfx_66;
struct opcode pfx_f2;
struct opcode pfx_f3;
};
struct escape {
struct opcode op[8];
struct opcode high[64];
};
struct instr_dual {
struct opcode mod012;
struct opcode mod3;
};
struct mode_dual {
struct opcode mode32;
struct opcode mode64;
};
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
enum x86_transfer_type {
X86_TRANSFER_NONE,
X86_TRANSFER_CALL_JMP,
X86_TRANSFER_RET,
X86_TRANSFER_TASK_SWITCH,
};
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
if (!(ctxt->regs_valid & (1 << nr))) {
ctxt->regs_valid |= 1 << nr;
ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
}
return ctxt->_regs[nr];
}
static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
ctxt->regs_valid |= 1 << nr;
ctxt->regs_dirty |= 1 << nr;
return &ctxt->_regs[nr];
}
static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
reg_read(ctxt, nr);
return reg_write(ctxt, nr);
}
static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{
unsigned reg;
for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
}
static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
{
ctxt->regs_dirty = 0;
ctxt->regs_valid = 0;
}
/*
* These EFLAGS bits are restored from saved value during emulation, and
* any changes are written back to the saved value after emulation.
*/
#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
X86_EFLAGS_PF|X86_EFLAGS_CF)
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
#define FOP_FUNC(name) \
".align " __stringify(FASTOP_SIZE) " \n\t" \
".type " name ", @function \n\t" \
name ":\n\t"
#define FOP_RET "ret \n\t"
#define FOP_START(op) \
extern void em_##op(struct fastop *fake); \
asm(".pushsection .text, \"ax\" \n\t" \
".global em_" #op " \n\t" \
FOP_FUNC("em_" #op)
#define FOP_END \
".popsection")
#define FOPNOP() \
FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
FOP_RET
#define FOP1E(op, dst) \
FOP_FUNC(#op "_" #dst) \
"10: " #op " %" #dst " \n\t" FOP_RET
#define FOP1EEX(op, dst) \
FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
#define FASTOP1(op) \
FOP_START(op) \
FOP1E(op##b, al) \
FOP1E(op##w, ax) \
FOP1E(op##l, eax) \
ON64(FOP1E(op##q, rax)) \
FOP_END
/* 1-operand, using src2 (for MUL/DIV r/m) */
#define FASTOP1SRC2(op, name) \
FOP_START(name) \
FOP1E(op, cl) \
FOP1E(op, cx) \
FOP1E(op, ecx) \
ON64(FOP1E(op, rcx)) \
FOP_END
/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
#define FASTOP1SRC2EX(op, name) \
FOP_START(name) \
FOP1EEX(op, cl) \
FOP1EEX(op, cx) \
FOP1EEX(op, ecx) \
ON64(FOP1EEX(op, rcx)) \
FOP_END
#define FOP2E(op, dst, src) \
FOP_FUNC(#op "_" #dst "_" #src) \
#op " %" #src ", %" #dst " \n\t" FOP_RET
#define FASTOP2(op) \
FOP_START(op) \
FOP2E(op##b, al, dl) \
FOP2E(op##w, ax, dx) \
FOP2E(op##l, eax, edx) \
ON64(FOP2E(op##q, rax, rdx)) \
FOP_END
/* 2 operand, word only */
#define FASTOP2W(op) \
FOP_START(op) \
FOPNOP() \
FOP2E(op##w, ax, dx) \
FOP2E(op##l, eax, edx) \
ON64(FOP2E(op##q, rax, rdx)) \
FOP_END
/* 2 operand, src is CL */
#define FASTOP2CL(op) \
FOP_START(op) \
FOP2E(op##b, al, cl) \
FOP2E(op##w, ax, cl) \
FOP2E(op##l, eax, cl) \
ON64(FOP2E(op##q, rax, cl)) \
FOP_END
/* 2 operand, src and dest are reversed */
#define FASTOP2R(op, name) \
FOP_START(name) \
FOP2E(op##b, dl, al) \
FOP2E(op##w, dx, ax) \
FOP2E(op##l, edx, eax) \
ON64(FOP2E(op##q, rdx, rax)) \
FOP_END
#define FOP3E(op, dst, src, src2) \
FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
#op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
FOP_START(op) \
FOPNOP() \
FOP3E(op##w, ax, dx, cl) \
FOP3E(op##l, eax, edx, cl) \
ON64(FOP3E(op##q, rax, rdx, cl)) \
FOP_END
/* Special case for SETcc - 1 instruction per cc */
#define FOP_SETCC(op) \
".align 4 \n\t" \
".type " #op ", @function \n\t" \
#op ": \n\t" \
#op " %al \n\t" \
FOP_RET
asm(".global kvm_fastop_exception \n"
"kvm_fastop_exception: xor %esi, %esi; ret");
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
FOP_SETCC(setnc)
FOP_SETCC(setz)
FOP_SETCC(setnz)
FOP_SETCC(setbe)
FOP_SETCC(setnbe)
FOP_SETCC(sets)
FOP_SETCC(setns)
FOP_SETCC(setp)
FOP_SETCC(setnp)
FOP_SETCC(setl)
FOP_SETCC(setnl)
FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
enum x86_intercept_stage stage)
{
struct x86_instruction_info info = {
.intercept = intercept,
.rep_prefix = ctxt->rep_prefix,
.modrm_mod = ctxt->modrm_mod,
.modrm_reg = ctxt->modrm_reg,
.modrm_rm = ctxt->modrm_rm,
.src_val = ctxt->src.val64,
.dst_val = ctxt->dst.val64,
.src_bytes = ctxt->src.bytes,
.dst_bytes = ctxt->dst.bytes,
.ad_bytes = ctxt->ad_bytes,
.next_rip = ctxt->eip,
};
return ctxt->ops->intercept(ctxt, &info, stage);
}
static void assign_masked(ulong *dest, ulong src, ulong mask)
{
*dest = (*dest & ~mask) | (src & mask);
}
static void assign_register(unsigned long *reg, u64 val, int bytes)
{
/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
switch (bytes) {
case 1:
*(u8 *)reg = (u8)val;
break;
case 2:
*(u16 *)reg = (u16)val;
break;
case 4:
*reg = (u32)val;
break; /* 64b: zero-extend */
case 8:
*reg = val;
break;
}
}
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
{
return (1UL << (ctxt->ad_bytes << 3)) - 1;
}
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{
u16 sel;
struct desc_struct ss;
if (ctxt->mode == X86EMUL_MODE_PROT64)
return ~0UL;
ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
}
static int stack_size(struct x86_emulate_ctxt *ctxt)
{
return (__fls(stack_mask(ctxt)) + 1) >> 3;
}
/* Access/update address held in a register, based on addressing mode. */
static inline unsigned long
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
{
if (ctxt->ad_bytes == sizeof(unsigned long))
return reg;
else
return reg & ad_mask(ctxt);
}
static inline unsigned long
register_address(struct x86_emulate_ctxt *ctxt, int reg)
{
return address_mask(ctxt, reg_read(ctxt, reg));
}
static void masked_increment(ulong *reg, ulong mask, int inc)
{
assign_masked(reg, *reg + inc, mask);
}
static inline void
register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
{
ulong *preg = reg_rmw(ctxt, reg);
assign_register(preg, *preg + inc, ctxt->ad_bytes);
}
static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{
masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
}
static u32 desc_limit_scaled(struct desc_struct *desc)
{
u32 limit = get_desc_limit(desc);
return desc->g ? (limit << 12) | 0xfff : limit;
}
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
{
if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
return 0;
return ctxt->ops->get_cached_segment_base(ctxt, seg);
}
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
u32 error, bool valid)
{
WARN_ON(vec > 0x1f);
ctxt->exception.vector = vec;
ctxt->exception.error_code = error;
ctxt->exception.error_code_valid = valid;
return X86EMUL_PROPAGATE_FAULT;
}
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, DB_VECTOR, 0, false);
}
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
{
return emulate_exception(ctxt, GP_VECTOR, err, true);
}
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
return emulate_exception(ctxt, SS_VECTOR, err, true);
}
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, UD_VECTOR, 0, false);
}
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
{
return emulate_exception(ctxt, TS_VECTOR, err, true);
}
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, DE_VECTOR, 0, false);
}
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, NM_VECTOR, 0, false);
}
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
u16 selector;
struct desc_struct desc;
ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
return selector;
}
static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
unsigned seg)
{
u16 dummy;
u32 base3;
struct desc_struct desc;
ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}
/*
* x86 defines three classes of vector instructions: explicitly
* aligned, explicitly unaligned, and the rest, which change behaviour
* depending on whether they're AVX encoded or not.
*
* Also included is CMPXCHG16B which is not a vector instruction, yet it is
* subject to the same check.
*/
static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
{
if (likely(size < 16))
return false;
if (ctxt->d & Aligned)
return true;
else if (ctxt->d & Unaligned)
return false;
else if (ctxt->d & Avx)
return false;
else
return true;
}
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned *max_size, unsigned size,
bool write, bool fetch,
enum x86emul_mode mode, ulong *linear)
{
struct desc_struct desc;
bool usable;
ulong la;
u32 lim;
u16 sel;
la = seg_base(ctxt, addr.seg) + addr.ea;
*max_size = 0;
switch (mode) {
case X86EMUL_MODE_PROT64:
*linear = la;
if (is_noncanonical_address(la))
goto bad;
*max_size = min_t(u64, ~0u, (1ull << 48) - la);
if (size > *max_size)
goto bad;
break;
default:
*linear = la = (u32)la;
usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
addr.seg);
if (!usable)
goto bad;
/* code segment in protected mode or read-only data segment */
if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
|| !(desc.type & 2)) && write)
goto bad;
/* unreadable code segment */
if (!fetch && (desc.type & 8) && !(desc.type & 2))
goto bad;
lim = desc_limit_scaled(&desc);
if (!(desc.type & 8) && (desc.type & 4)) {
/* expand-down segment */
if (addr.ea <= lim)
goto bad;
lim = desc.d ? 0xffffffff : 0xffff;
}
if (addr.ea > lim)
goto bad;
if (lim == 0xffffffff)
*max_size = ~0u;
else {
*max_size = (u64)lim + 1 - addr.ea;
if (size > *max_size)
goto bad;
}
break;
}
if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
bad:
if (addr.seg == VCPU_SREG_SS)
return emulate_ss(ctxt, 0);
else
return emulate_gp(ctxt, 0);
}
static int linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned size, bool write,
ulong *linear)
{
unsigned max_size;
return __linearize(ctxt, addr, &max_size, size, write, false,
ctxt->mode, linear);
}
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
enum x86emul_mode mode)
{
ulong linear;
int rc;
unsigned max_size;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = dst };
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->_eip = addr.ea;
return rc;
}
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
return assign_eip(ctxt, dst, ctxt->mode);
}
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
const struct desc_struct *cs_desc)
{
enum x86emul_mode mode = ctxt->mode;
int rc;
#ifdef CONFIG_X86_64
if (ctxt->mode >= X86EMUL_MODE_PROT16) {
if (cs_desc->l) {
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
mode = X86EMUL_MODE_PROT64;
} else
mode = X86EMUL_MODE_PROT32; /* temporary value */
}
#endif
if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
rc = assign_eip(ctxt, dst, mode);
if (rc == X86EMUL_CONTINUE)
ctxt->mode = mode;
return rc;
}
static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{
return assign_eip_near(ctxt, ctxt->_eip + rel);
}
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, false, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
}
/*
* Prefetch the remaining bytes of the instruction without crossing page
* boundary if they are not in fetch_cache yet.
*/
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
{
int rc;
unsigned size, max_size;
unsigned long linear;
int cur_size = ctxt->fetch.end - ctxt->fetch.data;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = ctxt->eip + cur_size };
/*
* We do not know exactly how many bytes will be needed, and
* __linearize is expensive, so fetch as much as possible. We
* just have to avoid going beyond the 15 byte limit, the end
* of the segment, or the end of the page.
*
* __linearize is called with size 0 so that it does not do any
* boundary check itself. Instead, we use max_size to check
* against op_size.
*/
rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
&linear);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
size = min_t(unsigned, 15UL ^ cur_size, max_size);
size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
/*
* One instruction can only straddle two pages,
* and one has been loaded at the beginning of
* x86_decode_insn. So, if not enough bytes
* still, we must have hit the 15-byte boundary.
*/
if (unlikely(size < op_size))
return emulate_gp(ctxt, 0);
rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
size, &ctxt->exception);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
ctxt->fetch.end += size;
return X86EMUL_CONTINUE;
}
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
unsigned size)
{
unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
if (unlikely(done_size < size))
return __do_insn_fetch_bytes(ctxt, size - done_size);
else
return X86EMUL_CONTINUE;
}
/* Fetch next part of the instruction being emulated. */
#define insn_fetch(_type, _ctxt) \
({ _type _x; \
\
rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
if (rc != X86EMUL_CONTINUE) \
goto done; \
ctxt->_eip += sizeof(_type); \
_x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
ctxt->fetch.ptr += sizeof(_type); \
_x; \
})
#define insn_fetch_arr(_arr, _size, _ctxt) \
({ \
rc = do_insn_fetch_bytes(_ctxt, _size); \
if (rc != X86EMUL_CONTINUE) \
goto done; \
ctxt->_eip += (_size); \
memcpy(_arr, ctxt->fetch.ptr, _size); \
ctxt->fetch.ptr += (_size); \
})
/*
* Given the 'reg' portion of a ModRM byte, and a register block, return a
* pointer into the block that addresses the relevant register.
* @highbyte_regs specifies whether to decode AH,CH,DH,BH.
*/
static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
int byteop)
{
void *p;
int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
else
p = reg_rmw(ctxt, modrm_reg);
return p;
}
static int read_descriptor(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
u16 *size, unsigned long *address, int op_bytes)
{
int rc;
if (op_bytes == 2)
op_bytes = 3;
*address = 0;
rc = segmented_read_std(ctxt, addr, size, 2);
if (rc != X86EMUL_CONTINUE)
return rc;
addr.ea += 2;
rc = segmented_read_std(ctxt, addr, address, op_bytes);
return rc;
}
FASTOP2(add);
FASTOP2(or);
FASTOP2(adc);
FASTOP2(sbb);
FASTOP2(and);
FASTOP2(sub);
FASTOP2(xor);
FASTOP2(cmp);
FASTOP2(test);
FASTOP1SRC2(mul, mul_ex);
FASTOP1SRC2(imul, imul_ex);
FASTOP1SRC2EX(div, div_ex);
FASTOP1SRC2EX(idiv, idiv_ex);
FASTOP3WCL(shld);
FASTOP3WCL(shrd);
FASTOP2W(imul);
FASTOP1(not);
FASTOP1(neg);
FASTOP1(inc);
FASTOP1(dec);
FASTOP2CL(rol);
FASTOP2CL(ror);
FASTOP2CL(rcl);
FASTOP2CL(rcr);
FASTOP2CL(shl);
FASTOP2CL(shr);
FASTOP2CL(sar);
FASTOP2W(bsf);
FASTOP2W(bsr);
FASTOP2W(bt);
FASTOP2W(bts);
FASTOP2W(btr);
FASTOP2W(btc);
FASTOP2(xadd);
FASTOP2R(cmp, cmp_r);
static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
{
/* If src is zero, do not writeback, but update flags */
if (ctxt->src.val == 0)
ctxt->dst.type = OP_NONE;
return fastop(ctxt, em_bsf);
}
static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
{
/* If src is zero, do not writeback, but update flags */
if (ctxt->src.val == 0)
ctxt->dst.type = OP_NONE;
return fastop(ctxt, em_bsr);
}
static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
{
u8 rc;
void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
asm("push %[flags]; popf; call *%[fastop]"
: "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
return rc;
}
static void fetch_register_operand(struct operand *op)
{
switch (op->bytes) {
case 1:
op->val = *(u8 *)op->addr.reg;
break;
case 2:
op->val = *(u16 *)op->addr.reg;
break;
case 4:
op->val = *(u32 *)op->addr.reg;
break;
case 8:
op->val = *(u64 *)op->addr.reg;
break;
}
}
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
#ifdef CONFIG_X86_64
case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
#endif
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
#ifdef CONFIG_X86_64
case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
#endif
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
ctxt->ops->get_fpu(ctxt);
asm volatile("fninit");
ctxt->ops->put_fpu(ctxt);
return X86EMUL_CONTINUE;
}
static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
{
u16 fcw;
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
ctxt->ops->get_fpu(ctxt);
asm volatile("fnstcw %0": "+m"(fcw));
ctxt->ops->put_fpu(ctxt);
ctxt->dst.val = fcw;
return X86EMUL_CONTINUE;
}
static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
{
u16 fsw;
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
ctxt->ops->get_fpu(ctxt);
asm volatile("fnstsw %0": "+m"(fsw));
ctxt->ops->put_fpu(ctxt);
ctxt->dst.val = fsw;
return X86EMUL_CONTINUE;
}
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
unsigned reg = ctxt->modrm_reg;
if (!(ctxt->d & ModRM))
reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = reg;
read_sse_reg(ctxt, &op->vec_val, reg);
return;
}
if (ctxt->d & Mmx) {
reg &= 7;
op->type = OP_MM;
op->bytes = 8;
op->addr.mm = reg;
return;
}
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
fetch_register_operand(op);
op->orig_val = op->val;
}
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{
if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
ctxt->modrm_seg = VCPU_SREG_SS;
}
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
u8 sib;
int index_reg, base_reg, scale;
int rc = X86EMUL_CONTINUE;
ulong modrm_ea = 0;
ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
ctxt->modrm_seg = VCPU_SREG_DS;
if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
ctxt->d & ByteOp);
if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = ctxt->modrm_rm;
read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
return rc;
}
if (ctxt->d & Mmx) {
op->type = OP_MM;
op->bytes = 8;
op->addr.mm = ctxt->modrm_rm & 7;
return rc;
}
fetch_register_operand(op);
return rc;
}
op->type = OP_MEM;
if (ctxt->ad_bytes == 2) {
unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
/* 16-bit ModR/M decode. */
switch (ctxt->modrm_mod) {
case 0:
if (ctxt->modrm_rm == 6)
modrm_ea += insn_fetch(u16, ctxt);
break;
case 1:
modrm_ea += insn_fetch(s8, ctxt);
break;
case 2:
modrm_ea += insn_fetch(u16, ctxt);
break;
}
switch (ctxt->modrm_rm) {
case 0:
modrm_ea += bx + si;
break;
case 1:
modrm_ea += bx + di;
break;
case 2:
modrm_ea += bp + si;
break;
case 3:
modrm_ea += bp + di;
break;
case 4:
modrm_ea += si;
break;
case 5:
modrm_ea += di;
break;
case 6:
if (ctxt->modrm_mod != 0)
modrm_ea += bp;
break;
case 7:
modrm_ea += bx;
break;
}
if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
(ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
ctxt->modrm_seg = VCPU_SREG_SS;
modrm_ea = (u16)modrm_ea;
} else {
/* 32/64-bit ModR/M decode. */
if ((ctxt->modrm_rm & 7) == 4) {
sib = insn_fetch(u8, ctxt);
index_reg |= (sib >> 3) & 7;
base_reg |= sib & 7;
scale = sib >> 6;
if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
modrm_ea += insn_fetch(s32, ctxt);
else {
modrm_ea += reg_read(ctxt, base_reg);
adjust_modrm_seg(ctxt, base_reg);
/* Increment ESP on POP [ESP] */
if ((ctxt->d & IncSP) &&
base_reg == VCPU_REGS_RSP)
modrm_ea += ctxt->op_bytes;
}
if (index_reg != 4)
modrm_ea += reg_read(ctxt, index_reg) << scale;
} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
modrm_ea += insn_fetch(s32, ctxt);
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->rip_relative = 1;
} else {
base_reg = ctxt->modrm_rm;
modrm_ea += reg_read(ctxt, base_reg);
adjust_modrm_seg(ctxt, base_reg);
}
switch (ctxt->modrm_mod) {
case 1:
modrm_ea += insn_fetch(s8, ctxt);
break;
case 2:
modrm_ea += insn_fetch(s32, ctxt);
break;
}
}
op->addr.mem.ea = modrm_ea;
if (ctxt->ad_bytes != 8)
ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
done:
return rc;
}
static int decode_abs(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
int rc = X86EMUL_CONTINUE;
op->type = OP_MEM;
switch (ctxt->ad_bytes) {
case 2:
op->addr.mem.ea = insn_fetch(u16, ctxt);
break;
case 4:
op->addr.mem.ea = insn_fetch(u32, ctxt);
break;
case 8:
op->addr.mem.ea = insn_fetch(u64, ctxt);
break;
}
done:
return rc;
}
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
{
long sv = 0, mask;
if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
mask = ~((long)ctxt->dst.bytes * 8 - 1);
if (ctxt->src.bytes == 2)
sv = (s16)ctxt->src.val & (s16)mask;
else if (ctxt->src.bytes == 4)
sv = (s32)ctxt->src.val & (s32)mask;
else
sv = (s64)ctxt->src.val & (s64)mask;
ctxt->dst.addr.mem.ea = address_mask(ctxt,
ctxt->dst.addr.mem.ea + (sv >> 3));
}
/* only subword offset */
ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
}
static int read_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr, void *dest, unsigned size)
{
int rc;
struct read_cache *mc = &ctxt->mem_read;
if (mc->pos < mc->end)
goto read_cached;
WARN_ON((mc->end + size) >= sizeof(mc->data));
rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
&ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
mc->end += size;
read_cached:
memcpy(dest, mc->data + mc->pos, size);
mc->pos += size;
return X86EMUL_CONTINUE;
}
static int segmented_read(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, false, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return read_emulated(ctxt, linear, data, size);
}
static int segmented_write(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
const void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->write_emulated(ctxt, linear, data, size,
&ctxt->exception);
}
static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
const void *orig_data, const void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
size, &ctxt->exception);
}
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
unsigned int size, unsigned short port,
void *dest)
{
struct read_cache *rc = &ctxt->io_read;
if (rc->pos == rc->end) { /* refill pio read ahead */
unsigned int in_page, n;
unsigned int count = ctxt->rep_prefix ?
address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
if (n == 0)
n = 1;
rc->pos = rc->end = 0;
if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
return 0;
rc->end = n * size;
}
if (ctxt->rep_prefix && (ctxt->d & String) &&
!(ctxt->eflags & X86_EFLAGS_DF)) {
ctxt->dst.data = rc->data + rc->pos;
ctxt->dst.type = OP_MEM_STR;
ctxt->dst.count = (rc->end - rc->pos) / size;
rc->pos = rc->end;
} else {
memcpy(dest, rc->data + rc->pos, size);
rc->pos += size;
}
return 1;
}
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
u16 index, struct desc_struct *desc)
{
struct desc_ptr dt;
ulong addr;
ctxt->ops->get_idt(ctxt, &dt);
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, index << 3 | 0x2);
addr = dt.address + index * 8;
return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
&ctxt->exception);
}
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_ptr *dt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
u32 base3 = 0;
if (selector & 1 << 2) {
struct desc_struct desc;
u16 sel;
memset (dt, 0, sizeof *dt);
if (!ops->get_segment(ctxt, &sel, &desc, &base3,
VCPU_SREG_LDTR))
return;
dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
} else
ops->get_gdt(ctxt, dt);
}
static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
u16 selector, ulong *desc_addr_p)
{
struct desc_ptr dt;
u16 index = selector >> 3;
ulong addr;
get_descriptor_table_ptr(ctxt, selector, &dt);
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, selector & 0xfffc);
addr = dt.address + index * 8;
#ifdef CONFIG_X86_64
if (addr >> 32 != 0) {
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (!(efer & EFER_LMA))
addr &= (u32)-1;
}
#endif
*desc_addr_p = addr;
return X86EMUL_CONTINUE;
}
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_struct *desc,
ulong *desc_addr_p)
{
int rc;
rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
&ctxt->exception);
}
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_struct *desc)
{
int rc;
ulong addr;
rc = get_descriptor_ptr(ctxt, selector, &addr);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
&ctxt->exception);
}
/* Does not support long mode */
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg, u8 cpl,
enum x86_transfer_type transfer,
struct desc_struct *desc)
{
struct desc_struct seg_desc, old_desc;
u8 dpl, rpl;
unsigned err_vec = GP_VECTOR;
u32 err_code = 0;
bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
ulong desc_addr;
int ret;
u16 dummy;
u32 base3 = 0;
memset(&seg_desc, 0, sizeof seg_desc);
if (ctxt->mode == X86EMUL_MODE_REAL) {
/* set real mode segment descriptor (keep limit etc. for
* unreal mode) */
ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
set_desc_base(&seg_desc, selector << 4);
goto load;
} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
/* VM86 needs a clean new segment descriptor */
set_desc_base(&seg_desc, selector << 4);
set_desc_limit(&seg_desc, 0xffff);
seg_desc.type = 3;
seg_desc.p = 1;
seg_desc.s = 1;
seg_desc.dpl = 3;
goto load;
}
rpl = selector & 3;
/* NULL selector is not valid for TR, CS and SS (except for long mode) */
if ((seg == VCPU_SREG_CS
|| (seg == VCPU_SREG_SS
&& (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
|| seg == VCPU_SREG_TR)
&& null_selector)
goto exception;
/* TR should be in GDT only */
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
goto exception;
if (null_selector) /* for NULL selector skip all following checks */
goto load;
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
err_code = selector & 0xfffc;
err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
GP_VECTOR;
/* can't load system descriptor into segment selector */
if (seg <= VCPU_SREG_GS && !seg_desc.s) {
if (transfer == X86_TRANSFER_CALL_JMP)
return X86EMUL_UNHANDLEABLE;
goto exception;
}
if (!seg_desc.p) {
err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
goto exception;
}
dpl = seg_desc.dpl;
switch (seg) {
case VCPU_SREG_SS:
/*
* segment is not a writable data segment or segment
* selector's RPL != CPL or segment selector's RPL != CPL
*/
if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
goto exception;
break;
case VCPU_SREG_CS:
if (!(seg_desc.type & 8))
goto exception;
if (seg_desc.type & 4) {
/* conforming */
if (dpl > cpl)
goto exception;
} else {
/* nonconforming */
if (rpl > cpl || dpl != cpl)
goto exception;
}
/* in long-mode d/b must be clear if l is set */
if (seg_desc.d && seg_desc.l) {
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
goto exception;
}
/* CS(RPL) <- CPL */
selector = (selector & 0xfffc) | cpl;
break;
case VCPU_SREG_TR:
if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
goto exception;
old_desc = seg_desc;
seg_desc.type |= 2; /* busy */
ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
sizeof(seg_desc), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
break;
case VCPU_SREG_LDTR:
if (seg_desc.s || seg_desc.type != 2)
goto exception;
break;
default: /* DS, ES, FS, or GS */
/*
* segment is not a data or readable code segment or
* ((segment is a data or nonconforming code segment)
* and (both RPL and CPL > DPL))
*/
if ((seg_desc.type & 0xa) == 0x8 ||
(((seg_desc.type & 0xc) != 0xc) &&
(rpl > dpl && cpl > dpl)))
goto exception;
break;
}
if (seg_desc.s) {
/* mark segment as accessed */
if (!(seg_desc.type & 1)) {
seg_desc.type |= 1;
ret = write_segment_descriptor(ctxt, selector,
&seg_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
}
} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
sizeof(base3), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
if (is_noncanonical_address(get_desc_base(&seg_desc) |
((u64)base3 << 32)))
return emulate_gp(ctxt, 0);
}
load:
ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
if (desc)
*desc = seg_desc;
return X86EMUL_CONTINUE;
exception:
return emulate_exception(ctxt, err_vec, err_code, true);
}
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg)
{
u8 cpl = ctxt->ops->cpl(ctxt);
return __load_segment_descriptor(ctxt, selector, seg, cpl,
X86_TRANSFER_NONE, NULL);
}
static void write_register_operand(struct operand *op)
{
return assign_register(op->addr.reg, op->val, op->bytes);
}
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
{
switch (op->type) {
case OP_REG:
write_register_operand(op);
break;
case OP_MEM:
if (ctxt->lock_prefix)
return segmented_cmpxchg(ctxt,
op->addr.mem,
&op->orig_val,
&op->val,
op->bytes);
else
return segmented_write(ctxt,
op->addr.mem,
&op->val,
op->bytes);
break;
case OP_MEM_STR:
return segmented_write(ctxt,
op->addr.mem,
op->data,
op->bytes * op->count);
break;
case OP_XMM:
write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
break;
case OP_MM:
write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
break;
case OP_NONE:
/* no writeback */
break;
default:
break;
}
return X86EMUL_CONTINUE;
}
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
{
struct segmented_address addr;
rsp_increment(ctxt, -bytes);
addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
return segmented_write(ctxt, addr, data, bytes);
}
static int em_push(struct x86_emulate_ctxt *ctxt)
{
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
}
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
void *dest, int len)
{
int rc;
struct segmented_address addr;
addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
rc = segmented_read(ctxt, addr, dest, len);
if (rc != X86EMUL_CONTINUE)
return rc;
rsp_increment(ctxt, len);
return rc;
}
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
}
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
void *dest, int len)
{
int rc;
unsigned long val, change_mask;
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
int cpl = ctxt->ops->cpl(ctxt);
rc = emulate_pop(ctxt, &val, len);
if (rc != X86EMUL_CONTINUE)
return rc;
change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
X86_EFLAGS_AC | X86_EFLAGS_ID;
switch(ctxt->mode) {
case X86EMUL_MODE_PROT64:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT16:
if (cpl == 0)
change_mask |= X86_EFLAGS_IOPL;
if (cpl <= iopl)
change_mask |= X86_EFLAGS_IF;
break;
case X86EMUL_MODE_VM86:
if (iopl < 3)
return emulate_gp(ctxt, 0);
change_mask |= X86_EFLAGS_IF;
break;
default: /* real mode */
change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
break;
}
*(unsigned long *)dest =
(ctxt->eflags & ~change_mask) | (val & change_mask);
return rc;
}
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.type = OP_REG;
ctxt->dst.addr.reg = &ctxt->eflags;
ctxt->dst.bytes = ctxt->op_bytes;
return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
}
static int em_enter(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned frame_size = ctxt->src.val;
unsigned nesting_level = ctxt->src2.val & 31;
ulong rbp;
if (nesting_level)
return X86EMUL_UNHANDLEABLE;
rbp = reg_read(ctxt, VCPU_REGS_RBP);
rc = push(ctxt, &rbp, stack_size(ctxt));
if (rc != X86EMUL_CONTINUE)
return rc;
assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
stack_mask(ctxt));
assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
stack_mask(ctxt));
return X86EMUL_CONTINUE;
}
static int em_leave(struct x86_emulate_ctxt *ctxt)
{
assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
stack_mask(ctxt));
return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
}
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
ctxt->src.val = get_segment_selector(ctxt, seg);
if (ctxt->op_bytes == 4) {
rsp_increment(ctxt, -2);
ctxt->op_bytes = 2;
}
return em_push(ctxt);
}
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
unsigned long selector;
int rc;
rc = emulate_pop(ctxt, &selector, 2);
if (rc != X86EMUL_CONTINUE)
return rc;
if (ctxt->modrm_reg == VCPU_SREG_SS)
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
if (ctxt->op_bytes > 2)
rsp_increment(ctxt, ctxt->op_bytes - 2);
rc = load_segment_descriptor(ctxt, (u16)selector, seg);
return rc;
}
static int em_pusha(struct x86_emulate_ctxt *ctxt)
{
unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RAX;
while (reg <= VCPU_REGS_RDI) {
(reg == VCPU_REGS_RSP) ?
(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
++reg;
}
return rc;
}
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
return em_push(ctxt);
}
static int em_popa(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RDI;
u32 val;
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
rsp_increment(ctxt, ctxt->op_bytes);
--reg;
}
rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
break;
assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
--reg;
}
return rc;
}
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
const struct x86_emulate_ops *ops = ctxt->ops;
int rc;
struct desc_ptr dt;
gva_t cs_addr;
gva_t eip_addr;
u16 cs, eip;
/* TODO: Add limit checks */
ctxt->src.val = ctxt->eflags;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->src.val = ctxt->_eip;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ops->get_idt(ctxt, &dt);
eip_addr = dt.address + (irq << 2);
cs_addr = dt.address + (irq << 2) + 2;
rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->_eip = eip;
return rc;
}
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
int rc;
invalidate_registers(ctxt);
rc = __emulate_int_real(ctxt, irq);
if (rc == X86EMUL_CONTINUE)
writeback_registers(ctxt);
return rc;
}
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
{
switch(ctxt->mode) {
case X86EMUL_MODE_REAL:
return __emulate_int_real(ctxt, irq);
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT64:
default:
/* Protected mode interrupts unimplemented yet */
return X86EMUL_UNHANDLEABLE;
}
}
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
unsigned long temp_eip = 0;
unsigned long temp_eflags = 0;
unsigned long cs = 0;
unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
X86_EFLAGS_AC | X86_EFLAGS_ID |
X86_EFLAGS_FIXED;
unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
X86_EFLAGS_VIP;
/* TODO: Add stack limit check */
rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
if (temp_eip & ~0xffff)
return emulate_gp(ctxt, 0);
rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->_eip = temp_eip;
if (ctxt->op_bytes == 4)
ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
else if (ctxt->op_bytes == 2) {
ctxt->eflags &= ~0xffff;
ctxt->eflags |= temp_eflags;
}
ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
ctxt->eflags |= X86_EFLAGS_FIXED;
ctxt->ops->set_nmi_mask(ctxt, false);
return rc;
}
static int em_iret(struct x86_emulate_ctxt *ctxt)
{
switch(ctxt->mode) {
case X86EMUL_MODE_REAL:
return emulate_iret_real(ctxt);
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT64:
default:
/* iret from protected mode unimplemented yet */
return X86EMUL_UNHANDLEABLE;
}
}
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned short sel, old_sel;
struct desc_struct old_desc, new_desc;
const struct x86_emulate_ops *ops = ctxt->ops;
u8 cpl = ctxt->ops->cpl(ctxt);
/* Assignment of RIP may only fail in 64-bit mode */
if (ctxt->mode == X86EMUL_MODE_PROT64)
ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
VCPU_SREG_CS);
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
X86_TRANSFER_CALL_JMP,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
if (rc != X86EMUL_CONTINUE) {
WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
/* assigning eip failed; restore the old cs */
ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
return rc;
}
return rc;
}
static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
{
return assign_eip_near(ctxt, ctxt->src.val);
}
static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
{
int rc;
long int old_eip;
old_eip = ctxt->_eip;
rc = assign_eip_near(ctxt, ctxt->src.val);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->src.val = old_eip;
rc = em_push(ctxt);
return rc;
}
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
{
u64 old = ctxt->dst.orig_val64;
if (ctxt->dst.bytes == 16)
return X86EMUL_UNHANDLEABLE;
if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
ctxt->eflags &= ~X86_EFLAGS_ZF;
} else {
ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
(u32) reg_read(ctxt, VCPU_REGS_RBX);
ctxt->eflags |= X86_EFLAGS_ZF;
}
return X86EMUL_CONTINUE;
}
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
return assign_eip_near(ctxt, eip);
}
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip, cs;
u16 old_cs;
int cpl = ctxt->ops->cpl(ctxt);
struct desc_struct old_desc, new_desc;
const struct x86_emulate_ops *ops = ctxt->ops;
if (ctxt->mode == X86EMUL_MODE_PROT64)
ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
VCPU_SREG_CS);
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
/* Outer-privilege level return is not implemented */
if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
return X86EMUL_UNHANDLEABLE;
rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
X86_TRANSFER_RET,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, eip, &new_desc);
if (rc != X86EMUL_CONTINUE) {
WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
}
return rc;
}
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
{
int rc;
rc = em_ret_far(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
rsp_increment(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE;
}
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
/* Save real source value, then compare EAX against destination. */
ctxt->dst.orig_val = ctxt->dst.val;
ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
ctxt->src.orig_val = ctxt->src.val;
ctxt->src.val = ctxt->dst.orig_val;
fastop(ctxt, em_cmp);
if (ctxt->eflags & X86_EFLAGS_ZF) {
/* Success: write back to memory; no update of EAX */
ctxt->src.type = OP_NONE;
ctxt->dst.val = ctxt->src.orig_val;
} else {
/* Failure: write the value we saw to EAX. */
ctxt->src.type = OP_REG;
ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
ctxt->src.val = ctxt->dst.orig_val;
/* Create write-cycle to dest by writing the same value */
ctxt->dst.val = ctxt->dst.orig_val;
}
return X86EMUL_CONTINUE;
}
static int em_lseg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
unsigned short sel;
int rc;
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = load_segment_descriptor(ctxt, sel, seg);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->dst.val = ctxt->src.val;
return rc;
}
static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
eax = 0x80000001;
ecx = 0;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
return edx & bit(X86_FEATURE_LM);
}
#define GET_SMSTATE(type, smbase, offset) \
({ \
type __val; \
int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
sizeof(__val)); \
if (r != X86EMUL_CONTINUE) \
return X86EMUL_UNHANDLEABLE; \
__val; \
})
static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
{
desc->g = (flags >> 23) & 1;
desc->d = (flags >> 22) & 1;
desc->l = (flags >> 21) & 1;
desc->avl = (flags >> 20) & 1;
desc->p = (flags >> 15) & 1;
desc->dpl = (flags >> 13) & 3;
desc->s = (flags >> 12) & 1;
desc->type = (flags >> 8) & 15;
}
static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
struct desc_struct desc;
int offset;
u16 selector;
selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
if (n < 3)
offset = 0x7f84 + n * 12;
else
offset = 0x7f2c + (n - 3) * 12;
set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
return X86EMUL_CONTINUE;
}
static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
struct desc_struct desc;
int offset;
u16 selector;
u32 base3;
offset = 0x7e00 + n * 16;
selector = GET_SMSTATE(u16, smbase, offset);
rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
base3 = GET_SMSTATE(u32, smbase, offset + 12);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
return X86EMUL_CONTINUE;
}
static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
u64 cr0, u64 cr4)
{
int bad;
/*
* First enable PAE, long mode needs it before CR0.PG = 1 is set.
* Then enable protected mode. However, PCID cannot be enabled
* if EFER.LMA=0, so set it separately.
*/
bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
if (bad)
return X86EMUL_UNHANDLEABLE;
bad = ctxt->ops->set_cr(ctxt, 0, cr0);
if (bad)
return X86EMUL_UNHANDLEABLE;
if (cr4 & X86_CR4_PCIDE) {
bad = ctxt->ops->set_cr(ctxt, 4, cr4);
if (bad)
return X86EMUL_UNHANDLEABLE;
}
return X86EMUL_CONTINUE;
}
static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
struct desc_struct desc;
struct desc_ptr dt;
u16 selector;
u32 val, cr0, cr4;
int i;
cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
for (i = 0; i < 8; i++)
*reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
val = GET_SMSTATE(u32, smbase, 0x7fcc);
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
val = GET_SMSTATE(u32, smbase, 0x7fc8);
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
selector = GET_SMSTATE(u32, smbase, 0x7fc4);
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
selector = GET_SMSTATE(u32, smbase, 0x7fc0);
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
ctxt->ops->set_gdt(ctxt, &dt);
dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
ctxt->ops->set_idt(ctxt, &dt);
for (i = 0; i < 6; i++) {
int r = rsm_load_seg_32(ctxt, smbase, i);
if (r != X86EMUL_CONTINUE)
return r;
}
cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
return rsm_enter_protected_mode(ctxt, cr0, cr4);
}
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
struct desc_struct desc;
struct desc_ptr dt;
u64 val, cr0, cr4;
u32 base3;
u16 selector;
int i, r;
for (i = 0; i < 16; i++)
*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
val = GET_SMSTATE(u32, smbase, 0x7f68);
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
val = GET_SMSTATE(u32, smbase, 0x7f60);
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50));
cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
val = GET_SMSTATE(u64, smbase, 0x7ed0);
ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
selector = GET_SMSTATE(u32, smbase, 0x7e90);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
ctxt->ops->set_idt(ctxt, &dt);
selector = GET_SMSTATE(u32, smbase, 0x7e70);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
ctxt->ops->set_gdt(ctxt, &dt);
r = rsm_enter_protected_mode(ctxt, cr0, cr4);
if (r != X86EMUL_CONTINUE)
return r;
for (i = 0; i < 6; i++) {
r = rsm_load_seg_64(ctxt, smbase, i);
if (r != X86EMUL_CONTINUE)
return r;
}
return X86EMUL_CONTINUE;
}
static int em_rsm(struct x86_emulate_ctxt *ctxt)
{
unsigned long cr0, cr4, efer;
u64 smbase;
int ret;
if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
return emulate_ud(ctxt);
/*
* Get back to real mode, to prepare a safe state in which to load
* CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
* supports long mode.
*/
cr4 = ctxt->ops->get_cr(ctxt, 4);
if (emulator_has_longmode(ctxt)) {
struct desc_struct cs_desc;
/* Zero CR4.PCIDE before CR0.PG. */
if (cr4 & X86_CR4_PCIDE) {
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
cr4 &= ~X86_CR4_PCIDE;
}
/* A 32-bit code segment is required to clear EFER.LMA. */
memset(&cs_desc, 0, sizeof(cs_desc));
cs_desc.type = 0xb;
cs_desc.s = cs_desc.g = cs_desc.p = 1;
ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
}
/* For the 64-bit case, this will clear EFER.LMA. */
cr0 = ctxt->ops->get_cr(ctxt, 0);
if (cr0 & X86_CR0_PE)
ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
/* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
if (cr4 & X86_CR4_PAE)
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
/* And finally go back to 32-bit mode. */
efer = 0;
ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
smbase = ctxt->ops->get_smbase(ctxt);
if (emulator_has_longmode(ctxt))
ret = rsm_load_state_64(ctxt, smbase + 0x8000);
else
ret = rsm_load_state_32(ctxt, smbase + 0x8000);
if (ret != X86EMUL_CONTINUE) {
/* FIXME: should triple fault */
return X86EMUL_UNHANDLEABLE;
}
if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);
ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
return X86EMUL_CONTINUE;
}
static void
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
struct desc_struct *cs, struct desc_struct *ss)
{
cs->l = 0; /* will be adjusted later */
set_desc_base(cs, 0); /* flat segment */
cs->g = 1; /* 4kb granularity */
set_desc_limit(cs, 0xfffff); /* 4GB limit */
cs->type = 0x0b; /* Read, Execute, Accessed */
cs->s = 1;
cs->dpl = 0; /* will be adjusted later */
cs->p = 1;
cs->d = 1;
cs->avl = 0;
set_desc_base(ss, 0); /* flat segment */
set_desc_limit(ss, 0xfffff); /* 4GB limit */
ss->g = 1; /* 4kb granularity */
ss->s = 1;
ss->type = 0x03; /* Read/Write, Accessed */
ss->d = 1; /* 32bit stack segment */
ss->dpl = 0;
ss->p = 1;
ss->l = 0;
ss->avl = 0;
}
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
eax = ecx = 0;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
u32 eax, ebx, ecx, edx;
/*
* syscall should always be enabled in longmode - so only become
* vendor specific (cpuid) if other modes are active...
*/
if (ctxt->mode == X86EMUL_MODE_PROT64)
return true;
eax = 0x00000000;
ecx = 0x00000000;
ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
/*
* Intel ("GenuineIntel")
* remark: Intel CPUs only support "syscall" in 64bit
* longmode. Also an 64bit guest with a
* 32bit compat-app running will #UD !! While this
* behaviour can be fixed (by emulating) into AMD
* response - CPUs of AMD can't behave like Intel.
*/
if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
return false;
/* AMD ("AuthenticAMD") */
if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
return true;
/* AMD ("AMDisbetter!") */
if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
return true;
/* default: (not Intel, not AMD), apply Intel's stricter rules... */
return false;
}
static int em_syscall(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
u64 efer = 0;
/* syscall is not available in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86)
return emulate_ud(ctxt);
if (!(em_syscall_is_enabled(ctxt)))
return emulate_ud(ctxt);
ops->get_msr(ctxt, MSR_EFER, &efer);
setup_syscalls_segments(ctxt, &cs, &ss);
if (!(efer & EFER_SCE))
return emulate_ud(ctxt);
ops->get_msr(ctxt, MSR_STAR, &msr_data);
msr_data >>= 32;
cs_sel = (u16)(msr_data & 0xfffc);
ss_sel = (u16)(msr_data + 8);
if (efer & EFER_LMA) {
cs.d = 0;
cs.l = 1;
}
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
if (efer & EFER_LMA) {
#ifdef CONFIG_X86_64
*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
ops->get_msr(ctxt,
ctxt->mode == X86EMUL_MODE_PROT64 ?
MSR_LSTAR : MSR_CSTAR, &msr_data);
ctxt->_eip = msr_data;
ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
ctxt->eflags &= ~msr_data;
ctxt->eflags |= X86_EFLAGS_FIXED;
#endif
} else {
/* legacy mode */
ops->get_msr(ctxt, MSR_STAR, &msr_data);
ctxt->_eip = (u32)msr_data;
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
}
return X86EMUL_CONTINUE;
}
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
u64 efer = 0;
ops->get_msr(ctxt, MSR_EFER, &efer);
/* inject #GP if in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL)
return emulate_gp(ctxt, 0);
/*
* Not recognized on AMD in compat mode (but is recognized in legacy
* mode).
*/
if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
&& !vendor_intel(ctxt))
return emulate_ud(ctxt);
/* sysenter/sysexit have not been tested in 64bit mode. */
if (ctxt->mode == X86EMUL_MODE_PROT64)
return X86EMUL_UNHANDLEABLE;
setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
ss_sel = cs_sel + 8;
if (efer & EFER_LMA) {
cs.d = 0;
cs.l = 1;
}
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
(u32)msr_data;
return X86EMUL_CONTINUE;
}
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data, rcx, rdx;
int usermode;
u16 cs_sel = 0, ss_sel = 0;
/* inject #GP if in real mode or Virtual 8086 mode */
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86)
return emulate_gp(ctxt, 0);
setup_syscalls_segments(ctxt, &cs, &ss);
if ((ctxt->rex_prefix & 0x8) != 0x0)
usermode = X86EMUL_MODE_PROT64;
else
usermode = X86EMUL_MODE_PROT32;
rcx = reg_read(ctxt, VCPU_REGS_RCX);
rdx = reg_read(ctxt, VCPU_REGS_RDX);
cs.dpl = 3;
ss.dpl = 3;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
switch (usermode) {
case X86EMUL_MODE_PROT32:
cs_sel = (u16)(msr_data + 16);
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
ss_sel = (u16)(msr_data + 24);
rcx = (u32)rcx;
rdx = (u32)rdx;
break;
case X86EMUL_MODE_PROT64:
cs_sel = (u16)(msr_data + 32);
if (msr_data == 0x0)
return emulate_gp(ctxt, 0);
ss_sel = cs_sel + 8;
cs.d = 0;
cs.l = 1;
if (is_noncanonical_address(rcx) ||
is_noncanonical_address(rdx))
return emulate_gp(ctxt, 0);
break;
}
cs_sel |= SEGMENT_RPL_MASK;
ss_sel |= SEGMENT_RPL_MASK;
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ctxt->_eip = rdx;
*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
return X86EMUL_CONTINUE;
}
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
{
int iopl;
if (ctxt->mode == X86EMUL_MODE_REAL)
return false;
if (ctxt->mode == X86EMUL_MODE_VM86)
return true;
iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
return ctxt->ops->cpl(ctxt) > iopl;
}
static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
u16 port, u16 len)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct tr_seg;
u32 base3;
int r;
u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
unsigned mask = (1 << len) - 1;
unsigned long base;
ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
if (!tr_seg.p)
return false;
if (desc_limit_scaled(&tr_seg) < 103)
return false;
base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
base |= ((u64)base3) << 32;
#endif
r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
if (r != X86EMUL_CONTINUE)
return false;
if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
return false;
r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
if (r != X86EMUL_CONTINUE)
return false;
if ((perm >> bit_idx) & mask)
return false;
return true;
}
static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
u16 port, u16 len)
{
if (ctxt->perm_ok)
return true;
if (emulator_bad_iopl(ctxt))
if (!emulator_io_port_access_allowed(ctxt, port, len))
return false;
ctxt->perm_ok = true;
return true;
}
static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
{
/*
* Intel CPUs mask the counter and pointers in quite strange
* manner when ECX is zero due to REP-string optimizations.
*/
#ifdef CONFIG_X86_64
if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
return;
*reg_write(ctxt, VCPU_REGS_RCX) = 0;
switch (ctxt->b) {
case 0xa4: /* movsb */
case 0xa5: /* movsd/w */
*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
/* fall through */
case 0xaa: /* stosb */
case 0xab: /* stosd/w */
*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
}
#endif
}
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
struct tss_segment_16 *tss)
{
tss->ip = ctxt->_eip;
tss->flag = ctxt->eflags;
tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
tss->si = reg_read(ctxt, VCPU_REGS_RSI);
tss->di = reg_read(ctxt, VCPU_REGS_RDI);
tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
}
static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
struct tss_segment_16 *tss)
{
int ret;
u8 cpl;
ctxt->_eip = tss->ip;
ctxt->eflags = tss->flag | 2;
*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
/*
* SDM says that segment selectors are loaded before segment
* descriptors
*/
set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
cpl = tss->cs & 3;
/*
* Now load segment descriptors. If fault happens at this stage
* it is handled in a context of new task
*/
ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
return X86EMUL_CONTINUE;
}
static int task_switch_16(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_16 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
save_state_to_tss16(ctxt, &tss_seg);
ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
if (old_tss_sel != 0xffff) {
tss_seg.prev_task_link = old_tss_sel;
ret = ops->write_std(ctxt, new_tss_base,
&tss_seg.prev_task_link,
sizeof tss_seg.prev_task_link,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
}
return load_state_from_tss16(ctxt, &tss_seg);
}
static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
struct tss_segment_32 *tss)
{
/* CR3 and ldt selector are not saved intentionally */
tss->eip = ctxt->_eip;
tss->eflags = ctxt->eflags;
tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
}
static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
struct tss_segment_32 *tss)
{
int ret;
u8 cpl;
if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
return emulate_gp(ctxt, 0);
ctxt->_eip = tss->eip;
ctxt->eflags = tss->eflags | 2;
/* General purpose registers */
*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
/*
* SDM says that segment selectors are loaded before segment
* descriptors. This is important because CPL checks will
* use CS.RPL.
*/
set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
/*
* If we're switching between Protected Mode and VM86, we need to make
* sure to update the mode before loading the segment descriptors so
* that the selectors are interpreted correctly.
*/
if (ctxt->eflags & X86_EFLAGS_VM) {
ctxt->mode = X86EMUL_MODE_VM86;
cpl = 3;
} else {
ctxt->mode = X86EMUL_MODE_PROT32;
cpl = tss->cs & 3;
}
/*
* Now load segment descriptors. If fault happenes at this stage
* it is handled in a context of new task
*/
ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
cpl, X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
return ret;
}
static int task_switch_32(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_32 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
u32 eip_offset = offsetof(struct tss_segment_32, eip);
u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
save_state_to_tss32(ctxt, &tss_seg);
/* Only GP registers and segment selectors are saved */
ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
ldt_sel_offset - eip_offset, &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
if (old_tss_sel != 0xffff) {
tss_seg.prev_task_link = old_tss_sel;
ret = ops->write_std(ctxt, new_tss_base,
&tss_seg.prev_task_link,
sizeof tss_seg.prev_task_link,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
}
return load_state_from_tss32(ctxt, &tss_seg);
}
static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int idt_index, int reason,
bool has_error_code, u32 error_code)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct curr_tss_desc, next_tss_desc;
int ret;
u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
ulong old_tss_base =
ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
u32 desc_limit;
ulong desc_addr, dr7;
/* FIXME: old_tss_base == ~0 ? */
ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
/* FIXME: check that next_tss_desc is tss */
/*
* Check privileges. The three cases are task switch caused by...
*
* 1. jmp/call/int to task gate: Check against DPL of the task gate
* 2. Exception/IRQ/iret: No check is performed
* 3. jmp/call to TSS/task-gate: No check is performed since the
* hardware checks it before exiting.
*/
if (reason == TASK_SWITCH_GATE) {
if (idt_index != -1) {
/* Software interrupts */
struct desc_struct task_gate_desc;
int dpl;
ret = read_interrupt_descriptor(ctxt, idt_index,
&task_gate_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
dpl = task_gate_desc.dpl;
if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
return emulate_gp(ctxt, (idt_index << 3) | 0x2);
}
}
desc_limit = desc_limit_scaled(&next_tss_desc);
if (!next_tss_desc.p ||
((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
desc_limit < 0x2b)) {
return emulate_ts(ctxt, tss_selector & 0xfffc);
}
if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
}
if (reason == TASK_SWITCH_IRET)
ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
/* set back link to prev task only if NT bit is set in eflags
note that old_tss_sel is not used after this point */
if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
old_tss_sel = 0xffff;
if (next_tss_desc.type & 8)
ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc);
else
ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
if (reason != TASK_SWITCH_IRET) {
next_tss_desc.type |= (1 << 1); /* set busy flag */
write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
}
ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
if (has_error_code) {
ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
ctxt->lock_prefix = 0;
ctxt->src.val = (unsigned long) error_code;
ret = em_push(ctxt);
}
ops->get_dr(ctxt, 7, &dr7);
ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
return ret;
}
int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int idt_index, int reason,
bool has_error_code, u32 error_code)
{
int rc;
invalidate_registers(ctxt);
ctxt->_eip = ctxt->eip;
ctxt->dst.type = OP_NONE;
rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
has_error_code, error_code);
if (rc == X86EMUL_CONTINUE) {
ctxt->eip = ctxt->_eip;
writeback_registers(ctxt);
}
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
}
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
struct operand *op)
{
int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
register_address_increment(ctxt, reg, df * op->bytes);
op->addr.mem.ea = register_address(ctxt, reg);
}
static int em_das(struct x86_emulate_ctxt *ctxt)
{
u8 al, old_al;
bool af, cf, old_cf;
cf = ctxt->eflags & X86_EFLAGS_CF;
al = ctxt->dst.val;
old_al = al;
old_cf = cf;
cf = false;
af = ctxt->eflags & X86_EFLAGS_AF;
if ((al & 0x0f) > 9 || af) {
al -= 6;
cf = old_cf | (al >= 250);
af = true;
} else {
af = false;
}
if (old_al > 0x99 || old_cf) {
al -= 0x60;
cf = true;
}
ctxt->dst.val = al;
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
if (cf)
ctxt->eflags |= X86_EFLAGS_CF;
if (af)
ctxt->eflags |= X86_EFLAGS_AF;
return X86EMUL_CONTINUE;
}
static int em_aam(struct x86_emulate_ctxt *ctxt)
{
u8 al, ah;
if (ctxt->src.val == 0)
return emulate_de(ctxt);
al = ctxt->dst.val & 0xff;
ah = al / ctxt->src.val;
al %= ctxt->src.val;
ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
return X86EMUL_CONTINUE;
}
static int em_aad(struct x86_emulate_ctxt *ctxt)
{
u8 al = ctxt->dst.val & 0xff;
u8 ah = (ctxt->dst.val >> 8) & 0xff;
al = (al + (ah * ctxt->src.val)) & 0xff;
ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
return X86EMUL_CONTINUE;
}
static int em_call(struct x86_emulate_ctxt *ctxt)
{
int rc;
long rel = ctxt->src.val;
ctxt->src.val = (unsigned long)ctxt->_eip;
rc = jmp_rel(ctxt, rel);
if (rc != X86EMUL_CONTINUE)
return rc;
return em_push(ctxt);
}
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
u16 sel, old_cs;
ulong old_eip;
int rc;
struct desc_struct old_desc, new_desc;
const struct x86_emulate_ops *ops = ctxt->ops;
int cpl = ctxt->ops->cpl(ctxt);
enum x86emul_mode prev_mode = ctxt->mode;
old_eip = ctxt->_eip;
ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
X86_TRANSFER_CALL_JMP, &new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
if (rc != X86EMUL_CONTINUE)
goto fail;
ctxt->src.val = old_cs;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
goto fail;
ctxt->src.val = old_eip;
rc = em_push(ctxt);
/* If we failed, we tainted the memory, but the very least we should
restore cs */
if (rc != X86EMUL_CONTINUE) {
pr_warn_once("faulting far call emulation tainted memory\n");
goto fail;
}
return rc;
fail:
ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
ctxt->mode = prev_mode;
return rc;
}
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_near(ctxt, eip);
if (rc != X86EMUL_CONTINUE)
return rc;
rsp_increment(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE;
}
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
/* Write back the register source. */
ctxt->src.val = ctxt->dst.val;
write_register_operand(&ctxt->src);
/* Write back the memory destination with implicit LOCK prefix. */
ctxt->dst.val = ctxt->src.orig_val;
ctxt->lock_prefix = 1;
return X86EMUL_CONTINUE;
}
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.val = ctxt->src2.val;
return fastop(ctxt, em_imul);
}
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.type = OP_REG;
ctxt->dst.bytes = ctxt->src.bytes;
ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
return X86EMUL_CONTINUE;
}
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
u64 tsc = 0;
ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
return X86EMUL_CONTINUE;
}
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{
u64 pmc;
if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
return emulate_gp(ctxt, 0);
*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
return X86EMUL_CONTINUE;
}
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
return X86EMUL_CONTINUE;
}
#define FFL(x) bit(X86_FEATURE_##x)
static int em_movbe(struct x86_emulate_ctxt *ctxt)
{
u32 ebx, ecx, edx, eax = 1;
u16 tmp;
/*
* Check MOVBE is set in the guest-visible CPUID leaf.
*/
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
if (!(ecx & FFL(MOVBE)))
return emulate_ud(ctxt);
switch (ctxt->op_bytes) {
case 2:
/*
* From MOVBE definition: "...When the operand size is 16 bits,
* the upper word of the destination register remains unchanged
* ..."
*
* Both casting ->valptr and ->val to u16 breaks strict aliasing
* rules so we have to do the operation almost per hand.
*/
tmp = (u16)ctxt->src.val;
ctxt->dst.val &= ~0xffffUL;
ctxt->dst.val |= (unsigned long)swab16(tmp);
break;
case 4:
ctxt->dst.val = swab32((u32)ctxt->src.val);
break;
case 8:
ctxt->dst.val = swab64(ctxt->src.val);
break;
default:
BUG();
}
return X86EMUL_CONTINUE;
}
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
return emulate_gp(ctxt, 0);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{
unsigned long val;
if (ctxt->mode == X86EMUL_MODE_PROT64)
val = ctxt->src.val & ~0ULL;
else
val = ctxt->src.val & ~0U;
/* #UD condition is already handled. */
if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
return emulate_gp(ctxt, 0);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{
u64 msr_data;
msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{
u64 msr_data;
if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
return emulate_gp(ctxt, 0);
*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
return X86EMUL_CONTINUE;
}
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->modrm_reg > VCPU_SREG_GS)
return emulate_ud(ctxt);
ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
ctxt->dst.bytes = 2;
return X86EMUL_CONTINUE;
}
static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
return emulate_ud(ctxt);
if (ctxt->modrm_reg == VCPU_SREG_SS)
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
}
static int em_lldt(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
}
static int em_ltr(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
}
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
int rc;
ulong linear;
rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->ops->invlpg(ctxt, linear);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
ulong cr0;
cr0 = ctxt->ops->get_cr(ctxt, 0);
cr0 &= ~X86_CR0_TS;
ctxt->ops->set_cr(ctxt, 0, cr0);
return X86EMUL_CONTINUE;
}
static int em_hypercall(struct x86_emulate_ctxt *ctxt)
{
int rc = ctxt->ops->fix_hypercall(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
/* Let the processor re-execute the fixed hypercall */
ctxt->_eip = ctxt->eip;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
void (*get)(struct x86_emulate_ctxt *ctxt,
struct desc_ptr *ptr))
{
struct desc_ptr desc_ptr;
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
get(ctxt, &desc_ptr);
if (ctxt->op_bytes == 2) {
ctxt->op_bytes = 4;
desc_ptr.address &= 0x00ffffff;
}
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return segmented_write(ctxt, ctxt->dst.addr.mem,
&desc_ptr, 2 + ctxt->op_bytes);
}
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{
return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
}
static int em_sidt(struct x86_emulate_ctxt *ctxt)
{
return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}
static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
{
struct desc_ptr desc_ptr;
int rc;
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
rc = read_descriptor(ctxt, ctxt->src.addr.mem,
&desc_ptr.size, &desc_ptr.address,
ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
if (ctxt->mode == X86EMUL_MODE_PROT64 &&
is_noncanonical_address(desc_ptr.address))
return emulate_gp(ctxt, 0);
if (lgdt)
ctxt->ops->set_gdt(ctxt, &desc_ptr);
else
ctxt->ops->set_idt(ctxt, &desc_ptr);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
return em_lgdt_lidt(ctxt, true);
}
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
return em_lgdt_lidt(ctxt, false);
}
static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->dst.type == OP_MEM)
ctxt->dst.bytes = 2;
ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
| (ctxt->src.val & 0x0f));
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
register_address_increment(ctxt, VCPU_REGS_RCX, -1);
if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
(ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
rc = jmp_rel(ctxt, ctxt->src.val);
return rc;
}
static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
rc = jmp_rel(ctxt, ctxt->src.val);
return rc;
}
static int em_in(struct x86_emulate_ctxt *ctxt)
{
if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
&ctxt->dst.val))
return X86EMUL_IO_NEEDED;
return X86EMUL_CONTINUE;
}
static int em_out(struct x86_emulate_ctxt *ctxt)
{
ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
&ctxt->src.val, 1);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
if (emulator_bad_iopl(ctxt))
return emulate_gp(ctxt, 0);
ctxt->eflags &= ~X86_EFLAGS_IF;
return X86EMUL_CONTINUE;
}
static int em_sti(struct x86_emulate_ctxt *ctxt)
{
if (emulator_bad_iopl(ctxt))
return emulate_gp(ctxt, 0);
ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
ctxt->eflags |= X86_EFLAGS_IF;
return X86EMUL_CONTINUE;
}
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
eax = reg_read(ctxt, VCPU_REGS_RAX);
ecx = reg_read(ctxt, VCPU_REGS_RCX);
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
*reg_write(ctxt, VCPU_REGS_RAX) = eax;
*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
*reg_write(ctxt, VCPU_REGS_RDX) = edx;
return X86EMUL_CONTINUE;
}
static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
u32 flags;
flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
X86_EFLAGS_SF;
flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
ctxt->eflags &= ~0xffUL;
ctxt->eflags |= flags | X86_EFLAGS_FIXED;
return X86EMUL_CONTINUE;
}
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
return X86EMUL_CONTINUE;
}
static int em_bswap(struct x86_emulate_ctxt *ctxt)
{
switch (ctxt->op_bytes) {
#ifdef CONFIG_X86_64
case 8:
asm("bswap %0" : "+r"(ctxt->dst.val));
break;
#endif
default:
asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
break;
}
return X86EMUL_CONTINUE;
}
static int em_clflush(struct x86_emulate_ctxt *ctxt)
{
/* emulating clflush regardless of cpuid */
return X86EMUL_CONTINUE;
}
static int em_movsxd(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.val = (s32) ctxt->src.val;
return X86EMUL_CONTINUE;
}
static bool valid_cr(int nr)
{
switch (nr) {
case 0:
case 2 ... 4:
case 8:
return true;
default:
return false;
}
}
static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
if (!valid_cr(ctxt->modrm_reg))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
}
static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
u64 new_val = ctxt->src.val64;
int cr = ctxt->modrm_reg;
u64 efer = 0;
static u64 cr_reserved_bits[] = {
0xffffffff00000000ULL,
0, 0, 0, /* CR3 checked later */
CR4_RESERVED_BITS,
0, 0, 0,
CR8_RESERVED_BITS,
};
if (!valid_cr(cr))
return emulate_ud(ctxt);
if (new_val & cr_reserved_bits[cr])
return emulate_gp(ctxt, 0);
switch (cr) {
case 0: {
u64 cr4;
if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
return emulate_gp(ctxt, 0);
cr4 = ctxt->ops->get_cr(ctxt, 4);
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
!(cr4 & X86_CR4_PAE))
return emulate_gp(ctxt, 0);
break;
}
case 3: {
u64 rsvd = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
if (new_val & rsvd)
return emulate_gp(ctxt, 0);
break;
}
case 4: {
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
return emulate_gp(ctxt, 0);
break;
}
}
return X86EMUL_CONTINUE;
}
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
unsigned long dr7;
ctxt->ops->get_dr(ctxt, 7, &dr7);
/* Check if DR7.Global_Enable is set */
return dr7 & (1 << 13);
}
static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
int dr = ctxt->modrm_reg;
u64 cr4;
if (dr > 7)
return emulate_ud(ctxt);
cr4 = ctxt->ops->get_cr(ctxt, 4);
if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
return emulate_ud(ctxt);
if (check_dr7_gd(ctxt)) {
ulong dr6;
ctxt->ops->get_dr(ctxt, 6, &dr6);
dr6 &= ~15;
dr6 |= DR6_BD | DR6_RTM;
ctxt->ops->set_dr(ctxt, 6, dr6);
return emulate_db(ctxt);
}
return X86EMUL_CONTINUE;
}
static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
u64 new_val = ctxt->src.val64;
int dr = ctxt->modrm_reg;
if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
return emulate_gp(ctxt, 0);
return check_dr_read(ctxt);
}
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
u64 efer;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (!(efer & EFER_SVME))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
}
static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
/* Valid physical address? */
if (rax & 0xffff000000000000ULL)
return emulate_gp(ctxt, 0);
return check_svme(ctxt);
}
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
}
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
ctxt->ops->check_pmc(ctxt, rcx))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
ctxt->src.bytes = min(ctxt->src.bytes, 4u);
if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
#define D(_y) { .flags = (_y) }
#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
.intercept = x86_intercept_##_i, .check_perm = (_p) }
#define N D(NotImpl)
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
#define II(_f, _e, _i) \
{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
#define IIP(_f, _e, _i, _p) \
{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
.intercept = x86_intercept_##_i, .check_perm = (_p) }
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
#define D2bv(_f) D((_f) | ByteOp), D(_f)
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
#define I2bvIP(_f, _e, _i, _p) \
IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
static const struct opcode group7_rm0[] = {
N,
I(SrcNone | Priv | EmulateOnUD, em_hypercall),
N, N, N, N, N, N,
};
static const struct opcode group7_rm1[] = {
DI(SrcNone | Priv, monitor),
DI(SrcNone | Priv, mwait),
N, N, N, N, N, N,
};
static const struct opcode group7_rm3[] = {
DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
DIP(SrcNone | Prot | Priv, stgi, check_svme),
DIP(SrcNone | Prot | Priv, clgi, check_svme),
DIP(SrcNone | Prot | Priv, skinit, check_svme),
DIP(SrcNone | Prot | Priv, invlpga, check_svme),
};
static const struct opcode group7_rm7[] = {
N,
DIP(SrcNone, rdtscp, check_rdtsc),
N, N, N, N, N, N,
};
static const struct opcode group1[] = {
F(Lock, em_add),
F(Lock | PageTable, em_or),
F(Lock, em_adc),
F(Lock, em_sbb),
F(Lock | PageTable, em_and),
F(Lock, em_sub),
F(Lock, em_xor),
F(NoWrite, em_cmp),
};
static const struct opcode group1A[] = {
I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
};
static const struct opcode group2[] = {
F(DstMem | ModRM, em_rol),
F(DstMem | ModRM, em_ror),
F(DstMem | ModRM, em_rcl),
F(DstMem | ModRM, em_rcr),
F(DstMem | ModRM, em_shl),
F(DstMem | ModRM, em_shr),
F(DstMem | ModRM, em_shl),
F(DstMem | ModRM, em_sar),
};
static const struct opcode group3[] = {
F(DstMem | SrcImm | NoWrite, em_test),
F(DstMem | SrcImm | NoWrite, em_test),
F(DstMem | SrcNone | Lock, em_not),
F(DstMem | SrcNone | Lock, em_neg),
F(DstXacc | Src2Mem, em_mul_ex),
F(DstXacc | Src2Mem, em_imul_ex),
F(DstXacc | Src2Mem, em_div_ex),
F(DstXacc | Src2Mem, em_idiv_ex),
};
static const struct opcode group4[] = {
F(ByteOp | DstMem | SrcNone | Lock, em_inc),
F(ByteOp | DstMem | SrcNone | Lock, em_dec),
N, N, N, N, N, N,
};
static const struct opcode group5[] = {
F(DstMem | SrcNone | Lock, em_inc),
F(DstMem | SrcNone | Lock, em_dec),
I(SrcMem | NearBranch, em_call_near_abs),
I(SrcMemFAddr | ImplicitOps, em_call_far),
I(SrcMem | NearBranch, em_jmp_abs),
I(SrcMemFAddr | ImplicitOps, em_jmp_far),
I(SrcMem | Stack, em_push), D(Undefined),
};
static const struct opcode group6[] = {
DI(Prot | DstMem, sldt),
DI(Prot | DstMem, str),
II(Prot | Priv | SrcMem16, em_lldt, lldt),
II(Prot | Priv | SrcMem16, em_ltr, ltr),
N, N, N, N,
};
static const struct group_dual group7 = { {
II(Mov | DstMem, em_sgdt, sgdt),
II(Mov | DstMem, em_sidt, sidt),
II(SrcMem | Priv, em_lgdt, lgdt),
II(SrcMem | Priv, em_lidt, lidt),
II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
}, {
EXT(0, group7_rm0),
EXT(0, group7_rm1),
N, EXT(0, group7_rm3),
II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
EXT(0, group7_rm7),
} };
static const struct opcode group8[] = {
N, N, N, N,
F(DstMem | SrcImmByte | NoWrite, em_bt),
F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
F(DstMem | SrcImmByte | Lock, em_btr),
F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
};
static const struct group_dual group9 = { {
N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
}, {
N, N, N, N, N, N, N, N,
} };
static const struct opcode group11[] = {
I(DstMem | SrcImm | Mov | PageTable, em_mov),
X7(D(Undefined)),
};
static const struct gprefix pfx_0f_ae_7 = {
I(SrcMem | ByteOp, em_clflush), N, N, N,
};
static const struct group_dual group15 = { {
N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
}, {
N, N, N, N, N, N, N, N,
} };
static const struct gprefix pfx_0f_6f_0f_7f = {
I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
};
static const struct instr_dual instr_dual_0f_2b = {
I(0, em_mov), N
};
static const struct gprefix pfx_0f_2b = {
ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
};
static const struct gprefix pfx_0f_28_0f_29 = {
I(Aligned, em_mov), I(Aligned, em_mov), N, N,
};
static const struct gprefix pfx_0f_e7 = {
N, I(Sse, em_mov), N, N,
};
static const struct escape escape_d9 = { {
N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xC8 - 0xCF */
N, N, N, N, N, N, N, N,
/* 0xD0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
N, N, N, N, N, N, N, N,
/* 0xE8 - 0xEF */
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xF7 */
N, N, N, N, N, N, N, N,
/* 0xF8 - 0xFF */
N, N, N, N, N, N, N, N,
} };
static const struct escape escape_db = { {
N, N, N, N, N, N, N, N,
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xC8 - 0xCF */
N, N, N, N, N, N, N, N,
/* 0xD0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
/* 0xE8 - 0xEF */
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xF7 */
N, N, N, N, N, N, N, N,
/* 0xF8 - 0xFF */
N, N, N, N, N, N, N, N,
} };
static const struct escape escape_dd = { {
N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xC8 - 0xCF */
N, N, N, N, N, N, N, N,
/* 0xD0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
N, N, N, N, N, N, N, N,
/* 0xE8 - 0xEF */
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xF7 */
N, N, N, N, N, N, N, N,
/* 0xF8 - 0xFF */
N, N, N, N, N, N, N, N,
} };
static const struct instr_dual instr_dual_0f_c3 = {
I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
};
static const struct mode_dual mode_dual_63 = {
N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
};
static const struct opcode opcode_table[256] = {
/* 0x00 - 0x07 */
F6ALU(Lock, em_add),
I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
/* 0x08 - 0x0F */
F6ALU(Lock | PageTable, em_or),
I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
N,
/* 0x10 - 0x17 */
F6ALU(Lock, em_adc),
I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
/* 0x18 - 0x1F */
F6ALU(Lock, em_sbb),
I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
/* 0x20 - 0x27 */
F6ALU(Lock | PageTable, em_and), N, N,
/* 0x28 - 0x2F */
F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
/* 0x30 - 0x37 */
F6ALU(Lock, em_xor), N, N,
/* 0x38 - 0x3F */
F6ALU(NoWrite, em_cmp), N, N,
/* 0x40 - 0x4F */
X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
/* 0x50 - 0x57 */
X8(I(SrcReg | Stack, em_push)),
/* 0x58 - 0x5F */
X8(I(DstReg | Stack, em_pop)),
/* 0x60 - 0x67 */
I(ImplicitOps | Stack | No64, em_pusha),
I(ImplicitOps | Stack | No64, em_popa),
N, MD(ModRM, &mode_dual_63),
N, N, N, N,
/* 0x68 - 0x6F */
I(SrcImm | Mov | Stack, em_push),
I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
I(SrcImmByte | Mov | Stack, em_push),
I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
/* 0x70 - 0x7F */
X16(D(SrcImmByte | NearBranch)),
/* 0x80 - 0x87 */
G(ByteOp | DstMem | SrcImm, group1),
G(DstMem | SrcImm, group1),
G(ByteOp | DstMem | SrcImm | No64, group1),
G(DstMem | SrcImmByte, group1),
F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
/* 0x88 - 0x8F */
I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
D(ModRM | SrcMem | NoAccess | DstReg),
I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
G(0, group1A),
/* 0x90 - 0x97 */
DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
/* 0x98 - 0x9F */
D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
I(SrcImmFAddr | No64, em_call_far), N,
II(ImplicitOps | Stack, em_pushf, pushf),
II(ImplicitOps | Stack, em_popf, popf),
I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
/* 0xA0 - 0xA7 */
I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
I2bv(SrcSI | DstDI | Mov | String, em_mov),
F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
/* 0xA8 - 0xAF */
F2bv(DstAcc | SrcImm | NoWrite, em_test),
I2bv(SrcAcc | DstDI | Mov | String, em_mov),
I2bv(SrcSI | DstAcc | Mov | String, em_mov),
F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
/* 0xB0 - 0xB7 */
X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
/* 0xB8 - 0xBF */
X8(I(DstReg | SrcImm64 | Mov, em_mov)),
/* 0xC0 - 0xC7 */
G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
I(ImplicitOps | NearBranch, em_ret),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
G(ByteOp, group11), G(0, group11),
/* 0xC8 - 0xCF */
I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
I(ImplicitOps | SrcImmU16, em_ret_far_imm),
I(ImplicitOps, em_ret_far),
D(ImplicitOps), DI(SrcImmByte, intn),
D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
/* 0xD0 - 0xD7 */
G(Src2One | ByteOp, group2), G(Src2One, group2),
G(Src2CL | ByteOp, group2), G(Src2CL, group2),
I(DstAcc | SrcImmUByte | No64, em_aam),
I(DstAcc | SrcImmUByte | No64, em_aad),
F(DstAcc | ByteOp | No64, em_salc),
I(DstAcc | SrcXLat | ByteOp, em_mov),
/* 0xD8 - 0xDF */
N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
/* 0xE0 - 0xE7 */
X3(I(SrcImmByte | NearBranch, em_loop)),
I(SrcImmByte | NearBranch, em_jcxz),
I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
/* 0xE8 - 0xEF */
I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
I(SrcImmFAddr | No64, em_jmp_far),
D(SrcImmByte | ImplicitOps | NearBranch),
I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
/* 0xF0 - 0xF7 */
N, DI(ImplicitOps, icebp), N, N,
DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
G(ByteOp, group3), G(0, group3),
/* 0xF8 - 0xFF */
D(ImplicitOps), D(ImplicitOps),
I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};
static const struct opcode twobyte_table[256] = {
/* 0x00 - 0x0F */
G(0, group6), GD(0, &group7), N, N,
N, I(ImplicitOps | EmulateOnUD, em_syscall),
II(ImplicitOps | Priv, em_clts, clts), N,
DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
/* 0x10 - 0x1F */
N, N, N, N, N, N, N, N,
D(ImplicitOps | ModRM | SrcMem | NoAccess),
N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
/* 0x20 - 0x2F */
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
check_cr_write),
IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
check_dr_write),
N, N, N, N,
GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
N, N, N, N,
/* 0x30 - 0x3F */
II(ImplicitOps | Priv, em_wrmsr, wrmsr),
IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
II(ImplicitOps | Priv, em_rdmsr, rdmsr),
IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
I(ImplicitOps | EmulateOnUD, em_sysenter),
I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
N, N,
N, N, N, N, N, N, N, N,
/* 0x40 - 0x4F */
X16(D(DstReg | SrcMem | ModRM)),
/* 0x50 - 0x5F */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
/* 0x60 - 0x6F */
N, N, N, N,
N, N, N, N,
N, N, N, N,
N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
/* 0x70 - 0x7F */
N, N, N, N,
N, N, N, N,
N, N, N, N,
N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
/* 0x80 - 0x8F */
X16(D(SrcImm | NearBranch)),
/* 0x90 - 0x9F */
X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
/* 0xA0 - 0xA7 */
I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
II(ImplicitOps, em_cpuid, cpuid),
F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
/* 0xA8 - 0xAF */
I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
/* 0xB0 - 0xB7 */
I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xB8 - 0xBF */
N, N,
G(BitOp, group8),
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
I(DstReg | SrcMem | ModRM, em_bsf_c),
I(DstReg | SrcMem | ModRM, em_bsr_c),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xC0 - 0xC7 */
F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
N, ID(0, &instr_dual_0f_c3),
N, N, N, GD(0, &group9),
/* 0xC8 - 0xCF */
X8(I(DstReg, em_bswap)),
/* 0xD0 - 0xDF */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
/* 0xE0 - 0xEF */
N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xFF */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};
static const struct instr_dual instr_dual_0f_38_f0 = {
I(DstReg | SrcMem | Mov, em_movbe), N
};
static const struct instr_dual instr_dual_0f_38_f1 = {
I(DstMem | SrcReg | Mov, em_movbe), N
};
static const struct gprefix three_byte_0f_38_f0 = {
ID(0, &instr_dual_0f_38_f0), N, N, N
};
static const struct gprefix three_byte_0f_38_f1 = {
ID(0, &instr_dual_0f_38_f1), N, N, N
};
/*
* Insns below are selected by the prefix which indexed by the third opcode
* byte.
*/
static const struct opcode opcode_map_0f_38[256] = {
/* 0x00 - 0x7f */
X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
/* 0x80 - 0xef */
X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
/* 0xf0 - 0xf1 */
GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
/* 0xf2 - 0xff */
N, N, X4(N), X8(N)
};
#undef D
#undef N
#undef G
#undef GD
#undef I
#undef GP
#undef EXT
#undef MD
#undef ID
#undef D2bv
#undef D2bvIP
#undef I2bv
#undef I2bvIP
#undef I6ALU
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
{
unsigned size;
size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
if (size == 8)
size = 4;
return size;
}
static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
unsigned size, bool sign_extension)
{
int rc = X86EMUL_CONTINUE;
op->type = OP_IMM;
op->bytes = size;
op->addr.mem.ea = ctxt->_eip;
/* NB. Immediates are sign-extended as necessary. */
switch (op->bytes) {
case 1:
op->val = insn_fetch(s8, ctxt);
break;
case 2:
op->val = insn_fetch(s16, ctxt);
break;
case 4:
op->val = insn_fetch(s32, ctxt);
break;
case 8:
op->val = insn_fetch(s64, ctxt);
break;
}
if (!sign_extension) {
switch (op->bytes) {
case 1:
op->val &= 0xff;
break;
case 2:
op->val &= 0xffff;
break;
case 4:
op->val &= 0xffffffff;
break;
}
}
done:
return rc;
}
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
unsigned d)
{
int rc = X86EMUL_CONTINUE;
switch (d) {
case OpReg:
decode_register_operand(ctxt, op);
break;
case OpImmUByte:
rc = decode_imm(ctxt, op, 1, false);
break;
case OpMem:
ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
mem_common:
*op = ctxt->memop;
ctxt->memopp = op;
if (ctxt->d & BitOp)
fetch_bit_operand(ctxt);
op->orig_val = op->val;
break;
case OpMem64:
ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
goto mem_common;
case OpAcc:
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpAccLo:
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpAccHi:
if (ctxt->d & ByteOp) {
op->type = OP_NONE;
break;
}
op->type = OP_REG;
op->bytes = ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpDI:
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
register_address(ctxt, VCPU_REGS_RDI);
op->addr.mem.seg = VCPU_SREG_ES;
op->val = 0;
op->count = 1;
break;
case OpDX:
op->type = OP_REG;
op->bytes = 2;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
fetch_register_operand(op);
break;
case OpCL:
op->type = OP_IMM;
op->bytes = 1;
op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
break;
case OpImmByte:
rc = decode_imm(ctxt, op, 1, true);
break;
case OpOne:
op->type = OP_IMM;
op->bytes = 1;
op->val = 1;
break;
case OpImm:
rc = decode_imm(ctxt, op, imm_size(ctxt), true);
break;
case OpImm64:
rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
break;
case OpMem8:
ctxt->memop.bytes = 1;
if (ctxt->memop.type == OP_REG) {
ctxt->memop.addr.reg = decode_register(ctxt,
ctxt->modrm_rm, true);
fetch_register_operand(&ctxt->memop);
}
goto mem_common;
case OpMem16:
ctxt->memop.bytes = 2;
goto mem_common;
case OpMem32:
ctxt->memop.bytes = 4;
goto mem_common;
case OpImmU16:
rc = decode_imm(ctxt, op, 2, false);
break;
case OpImmU:
rc = decode_imm(ctxt, op, imm_size(ctxt), false);
break;
case OpSI:
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
register_address(ctxt, VCPU_REGS_RSI);
op->addr.mem.seg = ctxt->seg_override;
op->val = 0;
op->count = 1;
break;
case OpXLat:
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
address_mask(ctxt,
reg_read(ctxt, VCPU_REGS_RBX) +
(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
op->addr.mem.seg = ctxt->seg_override;
op->val = 0;
break;
case OpImmFAddr:
op->type = OP_IMM;
op->addr.mem.ea = ctxt->_eip;
op->bytes = ctxt->op_bytes + 2;
insn_fetch_arr(op->valptr, op->bytes, ctxt);
break;
case OpMemFAddr:
ctxt->memop.bytes = ctxt->op_bytes + 2;
goto mem_common;
case OpES:
op->type = OP_IMM;
op->val = VCPU_SREG_ES;
break;
case OpCS:
op->type = OP_IMM;
op->val = VCPU_SREG_CS;
break;
case OpSS:
op->type = OP_IMM;
op->val = VCPU_SREG_SS;
break;
case OpDS:
op->type = OP_IMM;
op->val = VCPU_SREG_DS;
break;
case OpFS:
op->type = OP_IMM;
op->val = VCPU_SREG_FS;
break;
case OpGS:
op->type = OP_IMM;
op->val = VCPU_SREG_GS;
break;
case OpImplicit:
/* Special instructions do their own operand decoding. */
default:
op->type = OP_NONE; /* Disable writeback. */
break;
}
done:
return rc;
}
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
bool op_prefix = false;
bool has_seg_override = false;
struct opcode opcode;
ctxt->memop.type = OP_NONE;
ctxt->memopp = NULL;
ctxt->_eip = ctxt->eip;
ctxt->fetch.ptr = ctxt->fetch.data;
ctxt->fetch.end = ctxt->fetch.data + insn_len;
ctxt->opcode_len = 1;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
else {
rc = __do_insn_fetch_bytes(ctxt, 1);
if (rc != X86EMUL_CONTINUE)
return rc;
}
switch (mode) {
case X86EMUL_MODE_REAL:
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
case X86EMUL_MODE_PROT32:
def_op_bytes = def_ad_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86EMUL_MODE_PROT64:
def_op_bytes = 4;
def_ad_bytes = 8;
break;
#endif
default:
return EMULATION_FAILED;
}
ctxt->op_bytes = def_op_bytes;
ctxt->ad_bytes = def_ad_bytes;
/* Legacy prefixes. */
for (;;) {
switch (ctxt->b = insn_fetch(u8, ctxt)) {
case 0x66: /* operand-size override */
op_prefix = true;
/* switch between 2/4 bytes */
ctxt->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
has_seg_override = true;
ctxt->seg_override = (ctxt->b >> 3) & 3;
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
has_seg_override = true;
ctxt->seg_override = ctxt->b & 7;
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
ctxt->rex_prefix = ctxt->b;
continue;
case 0xf0: /* LOCK */
ctxt->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
case 0xf3: /* REP/REPE/REPZ */
ctxt->rep_prefix = ctxt->b;
break;
default:
goto done_prefixes;
}
/* Any legacy prefix after a REX prefix nullifies its effect. */
ctxt->rex_prefix = 0;
}
done_prefixes:
/* REX prefix. */
if (ctxt->rex_prefix & 8)
ctxt->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */
opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
if (ctxt->b == 0x0f) {
ctxt->opcode_len = 2;
ctxt->b = insn_fetch(u8, ctxt);
opcode = twobyte_table[ctxt->b];
/* 0F_38 opcode map */
if (ctxt->b == 0x38) {
ctxt->opcode_len = 3;
ctxt->b = insn_fetch(u8, ctxt);
opcode = opcode_map_0f_38[ctxt->b];
}
}
ctxt->d = opcode.flags;
if (ctxt->d & ModRM)
ctxt->modrm = insn_fetch(u8, ctxt);
/* vex-prefix instructions are not implemented */
if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
(mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
ctxt->d = NotImpl;
}
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
goffset = (ctxt->modrm >> 3) & 7;
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
else
opcode = opcode.u.gdual->mod012[goffset];
break;
case RMExt:
goffset = ctxt->modrm & 7;
opcode = opcode.u.group[goffset];
break;
case Prefix:
if (ctxt->rep_prefix && op_prefix)
return EMULATION_FAILED;
simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
switch (simd_prefix) {
case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
}
break;
case Escape:
if (ctxt->modrm > 0xbf)
opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
else
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
break;
case InstrDual:
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.idual->mod3;
else
opcode = opcode.u.idual->mod012;
break;
case ModeDual:
if (ctxt->mode == X86EMUL_MODE_PROT64)
opcode = opcode.u.mdual->mode64;
else
opcode = opcode.u.mdual->mode32;
break;
default:
return EMULATION_FAILED;
}
ctxt->d &= ~(u64)GroupMask;
ctxt->d |= opcode.flags;
}
/* Unrecognised? */
if (ctxt->d == 0)
return EMULATION_FAILED;
ctxt->execute = opcode.u.execute;
if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
(NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
No16))) {
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
*/
ctxt->check_perm = opcode.check_perm;
ctxt->intercept = opcode.intercept;
if (ctxt->d & NotImpl)
return EMULATION_FAILED;
if (mode == X86EMUL_MODE_PROT64) {
if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
ctxt->op_bytes = 8;
else if (ctxt->d & NearBranch)
ctxt->op_bytes = 8;
}
if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
else
ctxt->op_bytes = 4;
}
if ((ctxt->d & No16) && ctxt->op_bytes == 2)
ctxt->op_bytes = 4;
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
else if (ctxt->d & Mmx)
ctxt->op_bytes = 8;
}
/* ModRM and SIB bytes. */
if (ctxt->d & ModRM) {
rc = decode_modrm(ctxt, &ctxt->memop);
if (!has_seg_override) {
has_seg_override = true;
ctxt->seg_override = ctxt->modrm_seg;
}
} else if (ctxt->d & MemAbs)
rc = decode_abs(ctxt, &ctxt->memop);
if (rc != X86EMUL_CONTINUE)
goto done;
if (!has_seg_override)
ctxt->seg_override = VCPU_SREG_DS;
ctxt->memop.addr.mem.seg = ctxt->seg_override;
/*
* Decode and fetch the source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Decode and fetch the second source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
if (ctxt->rip_relative && likely(ctxt->memopp))
ctxt->memopp->addr.mem.ea = address_mask(ctxt,
ctxt->memopp->addr.mem.ea + ctxt->_eip);
done:
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
}
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{
return ctxt->d & PageTable;
}
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
/* The second termination condition only applies for REPE
* and REPNE. Test if the repeat string operation prefix is
* REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
* corresponding termination condition according to:
* - if REPE/REPZ and ZF = 0 then done
* - if REPNE/REPNZ and ZF = 1 then done
*/
if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
(ctxt->b == 0xae) || (ctxt->b == 0xaf))
&& (((ctxt->rep_prefix == REPE_PREFIX) &&
((ctxt->eflags & X86_EFLAGS_ZF) == 0))
|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
return true;
return false;
}
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
bool fault = false;
ctxt->ops->get_fpu(ctxt);
asm volatile("1: fwait \n\t"
"2: \n\t"
".pushsection .fixup,\"ax\" \n\t"
"3: \n\t"
"movb $1, %[fault] \n\t"
"jmp 2b \n\t"
".popsection \n\t"
_ASM_EXTABLE(1b, 3b)
: [fault]"+qm"(fault));
ctxt->ops->put_fpu(ctxt);
if (unlikely(fault))
return emulate_exception(ctxt, MF_VECTOR, 0, false);
return X86EMUL_CONTINUE;
}
static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
if (op->type == OP_MM)
read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
}
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{
register void *__sp asm(_ASM_SP);
ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
if (!(ctxt->d & ByteOp))
fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
: "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
[fastop]"+S"(fop), "+r"(__sp)
: "c"(ctxt->src2.val));
ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
if (!fop) /* exception is returned in fop variable */
return emulate_de(ctxt);
return X86EMUL_CONTINUE;
}
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
memset(&ctxt->rip_relative, 0,
(void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
ctxt->io_read.pos = 0;
ctxt->io_read.end = 0;
ctxt->mem_read.end = 0;
}
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
int rc = X86EMUL_CONTINUE;
int saved_dst_type = ctxt->dst.type;
ctxt->mem_read.pos = 0;
/* LOCK prefix is allowed only with some instructions */
if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
rc = emulate_ud(ctxt);
goto done;
}
if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
rc = emulate_ud(ctxt);
goto done;
}
if (unlikely(ctxt->d &
(No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
(ctxt->d & Undefined)) {
rc = emulate_ud(ctxt);
goto done;
}
if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
|| ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
rc = emulate_ud(ctxt);
goto done;
}
if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
rc = emulate_nm(ctxt);
goto done;
}
if (ctxt->d & Mmx) {
rc = flush_pending_x87_faults(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Now that we know the fpu is exception safe, we can fetch
* operands from it.
*/
fetch_possible_mmx_operand(ctxt, &ctxt->src);
fetch_possible_mmx_operand(ctxt, &ctxt->src2);
if (!(ctxt->d & Mov))
fetch_possible_mmx_operand(ctxt, &ctxt->dst);
}
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
/* Instruction can only be executed in protected mode */
if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
rc = emulate_ud(ctxt);
goto done;
}
/* Privileged instruction can be executed only in CPL=0 */
if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
if (ctxt->d & PrivUD)
rc = emulate_ud(ctxt);
else
rc = emulate_gp(ctxt, 0);
goto done;
}
/* Do instruction specific permission checks */
if (ctxt->d & CheckPerm) {
rc = ctxt->check_perm(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (ctxt->rep_prefix && (ctxt->d & String)) {
/* All REP prefixes have the same first termination condition */
if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
string_registers_quirk(ctxt);
ctxt->eip = ctxt->_eip;
ctxt->eflags &= ~X86_EFLAGS_RF;
goto done;
}
}
}
if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
rc = segmented_read(ctxt, ctxt->src.addr.mem,
ctxt->src.valptr, ctxt->src.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
ctxt->src.orig_val64 = ctxt->src.val64;
}
if (ctxt->src2.type == OP_MEM) {
rc = segmented_read(ctxt, ctxt->src2.addr.mem,
&ctxt->src2.val, ctxt->src2.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if ((ctxt->d & DstMask) == ImplicitOps)
goto special_insn;
if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
/* optimisation - avoid slow emulated read if Mov */
rc = segmented_read(ctxt, ctxt->dst.addr.mem,
&ctxt->dst.val, ctxt->dst.bytes);
if (rc != X86EMUL_CONTINUE) {
if (!(ctxt->d & NoWrite) &&
rc == X86EMUL_PROPAGATE_FAULT &&
ctxt->exception.vector == PF_VECTOR)
ctxt->exception.error_code |= PFERR_WRITE_MASK;
goto done;
}
}
/* Copy full 64-bit value for CMPXCHG8B. */
ctxt->dst.orig_val64 = ctxt->dst.val64;
special_insn:
if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (ctxt->rep_prefix && (ctxt->d & String))
ctxt->eflags |= X86_EFLAGS_RF;
else
ctxt->eflags &= ~X86_EFLAGS_RF;
if (ctxt->execute) {
if (ctxt->d & Fastop) {
void (*fop)(struct fastop *) = (void *)ctxt->execute;
rc = fastop(ctxt, fop);
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
}
rc = ctxt->execute(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
}
if (ctxt->opcode_len == 2)
goto twobyte_insn;
else if (ctxt->opcode_len == 3)
goto threebyte_insn;
switch (ctxt->b) {
case 0x70 ... 0x7f: /* jcc (short) */
if (test_cc(ctxt->b, ctxt->eflags))
rc = jmp_rel(ctxt, ctxt->src.val);
break;
case 0x8d: /* lea r16/r32, m */
ctxt->dst.val = ctxt->src.addr.mem.ea;
break;
case 0x90 ... 0x97: /* nop / xchg reg, rax */
if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
ctxt->dst.type = OP_NONE;
else
rc = em_xchg(ctxt);
break;
case 0x98: /* cbw/cwde/cdqe */
switch (ctxt->op_bytes) {
case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
}
break;
case 0xcc: /* int3 */
rc = emulate_int(ctxt, 3);
break;
case 0xcd: /* int n */
rc = emulate_int(ctxt, ctxt->src.val);
break;
case 0xce: /* into */
if (ctxt->eflags & X86_EFLAGS_OF)
rc = emulate_int(ctxt, 4);
break;
case 0xe9: /* jmp rel */
case 0xeb: /* jmp rel short */
rc = jmp_rel(ctxt, ctxt->src.val);
ctxt->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xf4: /* hlt */
ctxt->ops->halt(ctxt);
break;
case 0xf5: /* cmc */
/* complement carry flag from eflags reg */
ctxt->eflags ^= X86_EFLAGS_CF;
break;
case 0xf8: /* clc */
ctxt->eflags &= ~X86_EFLAGS_CF;
break;
case 0xf9: /* stc */
ctxt->eflags |= X86_EFLAGS_CF;
break;
case 0xfc: /* cld */
ctxt->eflags &= ~X86_EFLAGS_DF;
break;
case 0xfd: /* std */
ctxt->eflags |= X86_EFLAGS_DF;
break;
default:
goto cannot_emulate;
}
if (rc != X86EMUL_CONTINUE)
goto done;
writeback:
if (ctxt->d & SrcWrite) {
BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
rc = writeback(ctxt, &ctxt->src);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (!(ctxt->d & NoWrite)) {
rc = writeback(ctxt, &ctxt->dst);
if (rc != X86EMUL_CONTINUE)
goto done;
}
/*
* restore dst type in case the decoding will be reused
* (happens for string instruction )
*/
ctxt->dst.type = saved_dst_type;
if ((ctxt->d & SrcMask) == SrcSI)
string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
if ((ctxt->d & DstMask) == DstDI)
string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
if (ctxt->rep_prefix && (ctxt->d & String)) {
unsigned int count;
struct read_cache *r = &ctxt->io_read;
if ((ctxt->d & SrcMask) == SrcSI)
count = ctxt->src.count;
else
count = ctxt->dst.count;
register_address_increment(ctxt, VCPU_REGS_RCX, -count);
if (!string_insn_completed(ctxt)) {
/*
* Re-enter guest when pio read ahead buffer is empty
* or, if it is not used, after each 1024 iteration.
*/
if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
(r->end == 0 || r->end != r->pos)) {
/*
* Reset read cache. Usually happens before
* decode, but since instruction is restarted
* we have to do it here.
*/
ctxt->mem_read.end = 0;
writeback_registers(ctxt);
return EMULATION_RESTART;
}
goto done; /* skip rip writeback */
}
ctxt->eflags &= ~X86_EFLAGS_RF;
}
ctxt->eip = ctxt->_eip;
done:
if (rc == X86EMUL_PROPAGATE_FAULT) {
WARN_ON(ctxt->exception.vector > 0x1f);
ctxt->have_exception = true;
}
if (rc == X86EMUL_INTERCEPTED)
return EMULATION_INTERCEPTED;
if (rc == X86EMUL_CONTINUE)
writeback_registers(ctxt);
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
twobyte_insn:
switch (ctxt->b) {
case 0x09: /* wbinvd */
(ctxt->ops->wbinvd)(ctxt);
break;
case 0x08: /* invd */
case 0x0d: /* GrpP (prefetch) */
case 0x18: /* Grp16 (prefetch/nop) */
case 0x1f: /* nop */
break;
case 0x20: /* mov cr, reg */
ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
break;
case 0x21: /* mov from dr to reg */
ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
break;
case 0x40 ... 0x4f: /* cmov */
if (test_cc(ctxt->b, ctxt->eflags))
ctxt->dst.val = ctxt->src.val;
else if (ctxt->op_bytes != 4)
ctxt->dst.type = OP_NONE; /* no writeback */
break;
case 0x80 ... 0x8f: /* jnz rel, etc*/
if (test_cc(ctxt->b, ctxt->eflags))
rc = jmp_rel(ctxt, ctxt->src.val);
break;
case 0x90 ... 0x9f: /* setcc r/m8 */
ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
break;
case 0xb6 ... 0xb7: /* movzx */
ctxt->dst.bytes = ctxt->op_bytes;
ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
: (u16) ctxt->src.val;
break;
case 0xbe ... 0xbf: /* movsx */
ctxt->dst.bytes = ctxt->op_bytes;
ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
(s16) ctxt->src.val;
break;
default:
goto cannot_emulate;
}
threebyte_insn:
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
cannot_emulate:
return EMULATION_FAILED;
}
void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
{
invalidate_registers(ctxt);
}
void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
{
writeback_registers(ctxt);
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_5500_0 |
crossvul-cpp_data_good_1596_2 | /*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Gunnar Beutner
* Copyright (c) 2012 Cyril Plisko. All rights reserved.
*/
#include <stdio.h>
#include <strings.h>
#include <fcntl.h>
#include <sys/wait.h>
#include <unistd.h>
#include <libzfs.h>
#include <libshare.h>
#include "libshare_impl.h"
static boolean_t nfs_available(void);
static sa_fstype_t *nfs_fstype;
/*
* nfs_exportfs_temp_fd refers to a temporary copy of the output
* from exportfs -v.
*/
static int nfs_exportfs_temp_fd = -1;
typedef int (*nfs_host_callback_t)(const char *sharepath, const char *host,
const char *security, const char *access, void *cookie);
typedef struct nfs_host_cookie_s {
nfs_host_callback_t callback;
const char *sharepath;
void *cookie;
const char *security;
} nfs_host_cookie_t;
/*
* Helper function for foreach_host. This function checks whether the
* current share option is a host specification and invokes a callback
* function with information about the host.
*/
static int
foreach_nfs_host_cb(const char *opt, const char *value, void *pcookie)
{
int rc;
const char *access;
char *host_dup, *host, *next;
nfs_host_cookie_t *udata = (nfs_host_cookie_t *)pcookie;
#ifdef DEBUG
fprintf(stderr, "foreach_nfs_host_cb: key=%s, value=%s\n", opt, value);
#endif
if (strcmp(opt, "sec") == 0)
udata->security = value;
if (strcmp(opt, "rw") == 0 || strcmp(opt, "ro") == 0) {
if (value == NULL)
value = "*";
access = opt;
host_dup = strdup(value);
if (host_dup == NULL)
return (SA_NO_MEMORY);
host = host_dup;
do {
next = strchr(host, ':');
if (next != NULL) {
*next = '\0';
next++;
}
rc = udata->callback(udata->sharepath, host,
udata->security, access, udata->cookie);
if (rc != SA_OK) {
free(host_dup);
return (rc);
}
host = next;
} while (host != NULL);
free(host_dup);
}
return (SA_OK);
}
/*
* Invokes a callback function for all NFS hosts that are set for a share.
*/
static int
foreach_nfs_host(sa_share_impl_t impl_share, nfs_host_callback_t callback,
void *cookie)
{
nfs_host_cookie_t udata;
char *shareopts;
udata.callback = callback;
udata.sharepath = impl_share->sharepath;
udata.cookie = cookie;
udata.security = "sys";
shareopts = FSINFO(impl_share, nfs_fstype)->shareopts;
return foreach_nfs_shareopt(shareopts, foreach_nfs_host_cb,
&udata);
}
/*
* Converts a Solaris NFS host specification to its Linux equivalent.
*/
static int
get_linux_hostspec(const char *solaris_hostspec, char **plinux_hostspec)
{
/*
* For now we just support CIDR masks (e.g. @192.168.0.0/16) and host
* wildcards (e.g. *.example.org).
*/
if (solaris_hostspec[0] == '@') {
/*
* Solaris host specifier, e.g. @192.168.0.0/16; we just need
* to skip the @ in this case
*/
*plinux_hostspec = strdup(solaris_hostspec + 1);
} else {
*plinux_hostspec = strdup(solaris_hostspec);
}
if (*plinux_hostspec == NULL) {
return (SA_NO_MEMORY);
}
return (SA_OK);
}
/*
* Used internally by nfs_enable_share to enable sharing for a single host.
*/
static int
nfs_enable_share_one(const char *sharepath, const char *host,
const char *security, const char *access, void *pcookie)
{
int rc;
char *linuxhost, *hostpath, *opts;
const char *linux_opts = (const char *)pcookie;
char *argv[6];
/* exportfs -i -o sec=XX,rX,<opts> <host>:<sharepath> */
rc = get_linux_hostspec(host, &linuxhost);
if (rc < 0)
exit(1);
hostpath = malloc(strlen(linuxhost) + 1 + strlen(sharepath) + 1);
if (hostpath == NULL) {
free(linuxhost);
exit(1);
}
sprintf(hostpath, "%s:%s", linuxhost, sharepath);
free(linuxhost);
if (linux_opts == NULL)
linux_opts = "";
opts = malloc(4 + strlen(security) + 4 + strlen(linux_opts) + 1);
if (opts == NULL)
exit(1);
sprintf(opts, "sec=%s,%s,%s", security, access, linux_opts);
#ifdef DEBUG
fprintf(stderr, "sharing %s with opts %s\n", hostpath, opts);
#endif
argv[0] = "/usr/sbin/exportfs";
argv[1] = "-i";
argv[2] = "-o";
argv[3] = opts;
argv[4] = hostpath;
argv[5] = NULL;
rc = libzfs_run_process(argv[0], argv, 0);
free(hostpath);
free(opts);
if (rc < 0)
return (SA_SYSTEM_ERR);
else
return (SA_OK);
}
/*
* Adds a Linux share option to an array of NFS options.
*/
static int
add_linux_shareopt(char **plinux_opts, const char *key, const char *value)
{
size_t len = 0;
char *new_linux_opts;
if (*plinux_opts != NULL)
len = strlen(*plinux_opts);
new_linux_opts = realloc(*plinux_opts, len + 1 + strlen(key) +
(value ? 1 + strlen(value) : 0) + 1);
if (new_linux_opts == NULL)
return (SA_NO_MEMORY);
new_linux_opts[len] = '\0';
if (len > 0)
strcat(new_linux_opts, ",");
strcat(new_linux_opts, key);
if (value != NULL) {
strcat(new_linux_opts, "=");
strcat(new_linux_opts, value);
}
*plinux_opts = new_linux_opts;
return (SA_OK);
}
/*
* Validates and converts a single Solaris share option to its Linux
* equivalent.
*/
static int
get_linux_shareopts_cb(const char *key, const char *value, void *cookie)
{
char **plinux_opts = (char **)cookie;
/* host-specific options, these are taken care of elsewhere */
if (strcmp(key, "ro") == 0 || strcmp(key, "rw") == 0 ||
strcmp(key, "sec") == 0)
return (SA_OK);
if (strcmp(key, "anon") == 0)
key = "anonuid";
if (strcmp(key, "root_mapping") == 0) {
(void) add_linux_shareopt(plinux_opts, "root_squash", NULL);
key = "anonuid";
}
if (strcmp(key, "nosub") == 0)
key = "subtree_check";
if (strcmp(key, "insecure") != 0 && strcmp(key, "secure") != 0 &&
strcmp(key, "async") != 0 && strcmp(key, "sync") != 0 &&
strcmp(key, "no_wdelay") != 0 && strcmp(key, "wdelay") != 0 &&
strcmp(key, "nohide") != 0 && strcmp(key, "hide") != 0 &&
strcmp(key, "crossmnt") != 0 &&
strcmp(key, "no_subtree_check") != 0 &&
strcmp(key, "subtree_check") != 0 &&
strcmp(key, "insecure_locks") != 0 &&
strcmp(key, "secure_locks") != 0 &&
strcmp(key, "no_auth_nlm") != 0 && strcmp(key, "auth_nlm") != 0 &&
strcmp(key, "no_acl") != 0 && strcmp(key, "mountpoint") != 0 &&
strcmp(key, "mp") != 0 && strcmp(key, "fsuid") != 0 &&
strcmp(key, "refer") != 0 && strcmp(key, "replicas") != 0 &&
strcmp(key, "root_squash") != 0 &&
strcmp(key, "no_root_squash") != 0 &&
strcmp(key, "all_squash") != 0 &&
strcmp(key, "no_all_squash") != 0 && strcmp(key, "fsid") != 0 &&
strcmp(key, "anonuid") != 0 && strcmp(key, "anongid") != 0) {
return (SA_SYNTAX_ERR);
}
(void) add_linux_shareopt(plinux_opts, key, value);
return (SA_OK);
}
/*
* Takes a string containing Solaris share options (e.g. "sync,no_acl") and
* converts them to a NULL-terminated array of Linux NFS options.
*/
static int
get_linux_shareopts(const char *shareopts, char **plinux_opts)
{
int rc;
assert(plinux_opts != NULL);
*plinux_opts = NULL;
/* default options for Solaris shares */
(void) add_linux_shareopt(plinux_opts, "no_subtree_check", NULL);
(void) add_linux_shareopt(plinux_opts, "no_root_squash", NULL);
(void) add_linux_shareopt(plinux_opts, "mountpoint", NULL);
rc = foreach_shareopt(shareopts, get_linux_shareopts_cb,
plinux_opts);
if (rc != SA_OK) {
free(*plinux_opts);
*plinux_opts = NULL;
}
return (rc);
}
/*
* Enables NFS sharing for the specified share.
*/
static int
nfs_enable_share(sa_share_impl_t impl_share)
{
char *shareopts, *linux_opts;
int rc;
if (!nfs_available()) {
return (SA_SYSTEM_ERR);
}
shareopts = FSINFO(impl_share, nfs_fstype)->shareopts;
if (shareopts == NULL)
return (SA_OK);
rc = get_linux_shareopts(shareopts, &linux_opts);
if (rc != SA_OK)
return (rc);
rc = foreach_nfs_host(impl_share, nfs_enable_share_one, linux_opts);
free(linux_opts);
return (rc);
}
/*
* Used internally by nfs_disable_share to disable sharing for a single host.
*/
static int
nfs_disable_share_one(const char *sharepath, const char *host,
const char *security, const char *access, void *cookie)
{
int rc;
char *linuxhost, *hostpath;
char *argv[4];
rc = get_linux_hostspec(host, &linuxhost);
if (rc < 0)
exit(1);
hostpath = malloc(strlen(linuxhost) + 1 + strlen(sharepath) + 1);
if (hostpath == NULL) {
free(linuxhost);
exit(1);
}
sprintf(hostpath, "%s:%s", linuxhost, sharepath);
free(linuxhost);
#ifdef DEBUG
fprintf(stderr, "unsharing %s\n", hostpath);
#endif
argv[0] = "/usr/sbin/exportfs";
argv[1] = "-u";
argv[2] = hostpath;
argv[3] = NULL;
rc = libzfs_run_process(argv[0], argv, 0);
free(hostpath);
if (rc < 0)
return (SA_SYSTEM_ERR);
else
return (SA_OK);
}
/*
* Disables NFS sharing for the specified share.
*/
static int
nfs_disable_share(sa_share_impl_t impl_share)
{
if (!nfs_available()) {
/*
* The share can't possibly be active, so nothing
* needs to be done to disable it.
*/
return (SA_OK);
}
return (foreach_nfs_host(impl_share, nfs_disable_share_one, NULL));
}
/*
* Checks whether the specified NFS share options are syntactically correct.
*/
static int
nfs_validate_shareopts(const char *shareopts)
{
char *linux_opts;
int rc;
rc = get_linux_shareopts(shareopts, &linux_opts);
if (rc != SA_OK)
return (rc);
free(linux_opts);
return (SA_OK);
}
/*
* Checks whether a share is currently active.
*/
static boolean_t
nfs_is_share_active(sa_share_impl_t impl_share)
{
char line[512];
char *tab, *cur;
FILE *nfs_exportfs_temp_fp;
if (!nfs_available())
return (B_FALSE);
nfs_exportfs_temp_fp = fdopen(dup(nfs_exportfs_temp_fd), "r");
if (nfs_exportfs_temp_fp == NULL ||
fseek(nfs_exportfs_temp_fp, 0, SEEK_SET) < 0) {
fclose(nfs_exportfs_temp_fp);
return (B_FALSE);
}
while (fgets(line, sizeof (line), nfs_exportfs_temp_fp) != NULL) {
/*
* exportfs uses separate lines for the share path
* and the export options when the share path is longer
* than a certain amount of characters; this ignores
* the option lines
*/
if (line[0] == '\t')
continue;
tab = strchr(line, '\t');
if (tab != NULL) {
*tab = '\0';
cur = tab - 1;
} else {
/*
* there's no tab character, which means the
* NFS options are on a separate line; we just
* need to remove the new-line character
* at the end of the line
*/
cur = line + strlen(line) - 1;
}
/* remove trailing spaces and new-line characters */
while (cur >= line && (*cur == ' ' || *cur == '\n'))
*cur-- = '\0';
if (strcmp(line, impl_share->sharepath) == 0) {
fclose(nfs_exportfs_temp_fp);
return (B_TRUE);
}
}
fclose(nfs_exportfs_temp_fp);
return (B_FALSE);
}
/*
* Called to update a share's options. A share's options might be out of
* date if the share was loaded from disk (i.e. /etc/dfs/sharetab) and the
* "sharenfs" dataset property has changed in the meantime. This function
* also takes care of re-enabling the share if necessary.
*/
static int
nfs_update_shareopts(sa_share_impl_t impl_share, const char *resource,
const char *shareopts)
{
char *shareopts_dup;
boolean_t needs_reshare = B_FALSE;
char *old_shareopts;
FSINFO(impl_share, nfs_fstype)->active =
nfs_is_share_active(impl_share);
old_shareopts = FSINFO(impl_share, nfs_fstype)->shareopts;
if (strcmp(shareopts, "on") == 0)
shareopts = "rw";
if (FSINFO(impl_share, nfs_fstype)->active && old_shareopts != NULL &&
strcmp(old_shareopts, shareopts) != 0) {
needs_reshare = B_TRUE;
nfs_disable_share(impl_share);
}
shareopts_dup = strdup(shareopts);
if (shareopts_dup == NULL)
return (SA_NO_MEMORY);
if (old_shareopts != NULL)
free(old_shareopts);
FSINFO(impl_share, nfs_fstype)->shareopts = shareopts_dup;
if (needs_reshare)
nfs_enable_share(impl_share);
return (SA_OK);
}
/*
* Clears a share's NFS options. Used by libshare to
* clean up shares that are about to be free()'d.
*/
static void
nfs_clear_shareopts(sa_share_impl_t impl_share)
{
free(FSINFO(impl_share, nfs_fstype)->shareopts);
FSINFO(impl_share, nfs_fstype)->shareopts = NULL;
}
static const sa_share_ops_t nfs_shareops = {
.enable_share = nfs_enable_share,
.disable_share = nfs_disable_share,
.validate_shareopts = nfs_validate_shareopts,
.update_shareopts = nfs_update_shareopts,
.clear_shareopts = nfs_clear_shareopts,
};
/*
* nfs_check_exportfs() checks that the exportfs command runs
* and also maintains a temporary copy of the output from
* exportfs -v.
* To update this temporary copy simply call this function again.
*
* TODO : Use /var/lib/nfs/etab instead of our private copy.
* But must implement locking to prevent concurrent access.
*
* TODO : The temporary file descriptor is never closed since
* there is no libshare_nfs_fini() function.
*/
static int
nfs_check_exportfs(void)
{
pid_t pid;
int rc, status;
static char nfs_exportfs_tempfile[] = "/tmp/exportfs.XXXXXX";
/*
* Close any existing temporary copies of output from exportfs.
* We have already called unlink() so file will be deleted.
*/
if (nfs_exportfs_temp_fd >= 0)
close(nfs_exportfs_temp_fd);
nfs_exportfs_temp_fd = mkstemp(nfs_exportfs_tempfile);
if (nfs_exportfs_temp_fd < 0)
return (SA_SYSTEM_ERR);
unlink(nfs_exportfs_tempfile);
fcntl(nfs_exportfs_temp_fd, F_SETFD, FD_CLOEXEC);
pid = fork();
if (pid < 0) {
(void) close(nfs_exportfs_temp_fd);
nfs_exportfs_temp_fd = -1;
return (SA_SYSTEM_ERR);
}
if (pid > 0) {
while ((rc = waitpid(pid, &status, 0)) <= 0 && errno == EINTR);
if (rc <= 0) {
(void) close(nfs_exportfs_temp_fd);
nfs_exportfs_temp_fd = -1;
return (SA_SYSTEM_ERR);
}
if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
(void) close(nfs_exportfs_temp_fd);
nfs_exportfs_temp_fd = -1;
return (SA_CONFIG_ERR);
}
return (SA_OK);
}
/* child */
/* exportfs -v */
if (dup2(nfs_exportfs_temp_fd, STDOUT_FILENO) < 0)
exit(1);
rc = execlp("/usr/sbin/exportfs", "exportfs", "-v", NULL);
if (rc < 0) {
exit(1);
}
exit(0);
}
/*
* Provides a convenient wrapper for determing nfs availability
*/
static boolean_t
nfs_available(void)
{
if (nfs_exportfs_temp_fd == -1)
(void) nfs_check_exportfs();
return ((nfs_exportfs_temp_fd != -1) ? B_TRUE : B_FALSE);
}
/*
* Initializes the NFS functionality of libshare.
*/
void
libshare_nfs_init(void)
{
nfs_fstype = register_fstype("nfs", &nfs_shareops);
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_1596_2 |
crossvul-cpp_data_bad_1596_0 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-200/c/bad_1596_0 |
crossvul-cpp_data_bad_5098_0 | /*
* Copyright (c) 2014, Ericsson AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "bearer.h"
#include "link.h"
#include "name_table.h"
#include "socket.h"
#include "node.h"
#include "net.h"
#include <net/genetlink.h>
#include <linux/tipc_config.h>
/* The legacy API had an artificial message length limit called
* ULTRA_STRING_MAX_LEN.
*/
#define ULTRA_STRING_MAX_LEN 32768
#define TIPC_SKB_MAX TLV_SPACE(ULTRA_STRING_MAX_LEN)
#define REPLY_TRUNCATED "<truncated>\n"
struct tipc_nl_compat_msg {
u16 cmd;
int rep_type;
int rep_size;
int req_type;
struct net *net;
struct sk_buff *rep;
struct tlv_desc *req;
struct sock *dst_sk;
};
struct tipc_nl_compat_cmd_dump {
int (*header)(struct tipc_nl_compat_msg *);
int (*dumpit)(struct sk_buff *, struct netlink_callback *);
int (*format)(struct tipc_nl_compat_msg *msg, struct nlattr **attrs);
};
struct tipc_nl_compat_cmd_doit {
int (*doit)(struct sk_buff *skb, struct genl_info *info);
int (*transcode)(struct tipc_nl_compat_cmd_doit *cmd,
struct sk_buff *skb, struct tipc_nl_compat_msg *msg);
};
static int tipc_skb_tailroom(struct sk_buff *skb)
{
int tailroom;
int limit;
tailroom = skb_tailroom(skb);
limit = TIPC_SKB_MAX - skb->len;
if (tailroom < limit)
return tailroom;
return limit;
}
static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
{
struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
if (tipc_skb_tailroom(skb) < TLV_SPACE(len))
return -EMSGSIZE;
skb_put(skb, TLV_SPACE(len));
tlv->tlv_type = htons(type);
tlv->tlv_len = htons(TLV_LENGTH(len));
if (len && data)
memcpy(TLV_DATA(tlv), data, len);
return 0;
}
static void tipc_tlv_init(struct sk_buff *skb, u16 type)
{
struct tlv_desc *tlv = (struct tlv_desc *)skb->data;
TLV_SET_LEN(tlv, 0);
TLV_SET_TYPE(tlv, type);
skb_put(skb, sizeof(struct tlv_desc));
}
static int tipc_tlv_sprintf(struct sk_buff *skb, const char *fmt, ...)
{
int n;
u16 len;
u32 rem;
char *buf;
struct tlv_desc *tlv;
va_list args;
rem = tipc_skb_tailroom(skb);
tlv = (struct tlv_desc *)skb->data;
len = TLV_GET_LEN(tlv);
buf = TLV_DATA(tlv) + len;
va_start(args, fmt);
n = vscnprintf(buf, rem, fmt, args);
va_end(args);
TLV_SET_LEN(tlv, n + len);
skb_put(skb, n);
return n;
}
static struct sk_buff *tipc_tlv_alloc(int size)
{
int hdr_len;
struct sk_buff *buf;
size = TLV_SPACE(size);
hdr_len = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
buf = alloc_skb(hdr_len + size, GFP_KERNEL);
if (!buf)
return NULL;
skb_reserve(buf, hdr_len);
return buf;
}
static struct sk_buff *tipc_get_err_tlv(char *str)
{
int str_len = strlen(str) + 1;
struct sk_buff *buf;
buf = tipc_tlv_alloc(TLV_SPACE(str_len));
if (buf)
tipc_add_tlv(buf, TIPC_TLV_ERROR_STRING, str, str_len);
return buf;
}
static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
struct tipc_nl_compat_msg *msg,
struct sk_buff *arg)
{
int len = 0;
int err;
struct sk_buff *buf;
struct nlmsghdr *nlmsg;
struct netlink_callback cb;
memset(&cb, 0, sizeof(cb));
cb.nlh = (struct nlmsghdr *)arg->data;
cb.skb = arg;
buf = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf->sk = msg->dst_sk;
do {
int rem;
len = (*cmd->dumpit)(buf, &cb);
nlmsg_for_each_msg(nlmsg, nlmsg_hdr(buf), len, rem) {
struct nlattr **attrs;
err = tipc_nlmsg_parse(nlmsg, &attrs);
if (err)
goto err_out;
err = (*cmd->format)(msg, attrs);
if (err)
goto err_out;
if (tipc_skb_tailroom(msg->rep) <= 1) {
err = -EMSGSIZE;
goto err_out;
}
}
skb_reset_tail_pointer(buf);
buf->len = 0;
} while (len);
err = 0;
err_out:
kfree_skb(buf);
if (err == -EMSGSIZE) {
/* The legacy API only considered messages filling
* "ULTRA_STRING_MAX_LEN" to be truncated.
*/
if ((TIPC_SKB_MAX - msg->rep->len) <= 1) {
char *tail = skb_tail_pointer(msg->rep);
if (*tail != '\0')
sprintf(tail - sizeof(REPLY_TRUNCATED) - 1,
REPLY_TRUNCATED);
}
return 0;
}
return err;
}
static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
struct tipc_nl_compat_msg *msg)
{
int err;
struct sk_buff *arg;
if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
return -EINVAL;
msg->rep = tipc_tlv_alloc(msg->rep_size);
if (!msg->rep)
return -ENOMEM;
if (msg->rep_type)
tipc_tlv_init(msg->rep, msg->rep_type);
if (cmd->header)
(*cmd->header)(msg);
arg = nlmsg_new(0, GFP_KERNEL);
if (!arg) {
kfree_skb(msg->rep);
return -ENOMEM;
}
err = __tipc_nl_compat_dumpit(cmd, msg, arg);
if (err)
kfree_skb(msg->rep);
kfree_skb(arg);
return err;
}
static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
struct tipc_nl_compat_msg *msg)
{
int err;
struct sk_buff *doit_buf;
struct sk_buff *trans_buf;
struct nlattr **attrbuf;
struct genl_info info;
trans_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!trans_buf)
return -ENOMEM;
err = (*cmd->transcode)(cmd, trans_buf, msg);
if (err)
goto trans_out;
attrbuf = kmalloc((tipc_genl_family.maxattr + 1) *
sizeof(struct nlattr *), GFP_KERNEL);
if (!attrbuf) {
err = -ENOMEM;
goto trans_out;
}
err = nla_parse(attrbuf, tipc_genl_family.maxattr,
(const struct nlattr *)trans_buf->data,
trans_buf->len, NULL);
if (err)
goto parse_out;
doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!doit_buf) {
err = -ENOMEM;
goto parse_out;
}
doit_buf->sk = msg->dst_sk;
memset(&info, 0, sizeof(info));
info.attrs = attrbuf;
err = (*cmd->doit)(doit_buf, &info);
kfree_skb(doit_buf);
parse_out:
kfree(attrbuf);
trans_out:
kfree_skb(trans_buf);
return err;
}
static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
struct tipc_nl_compat_msg *msg)
{
int err;
if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
return -EINVAL;
err = __tipc_nl_compat_doit(cmd, msg);
if (err)
return err;
/* The legacy API considered an empty message a success message */
msg->rep = tipc_tlv_alloc(0);
if (!msg->rep)
return -ENOMEM;
return 0;
}
static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg,
struct nlattr **attrs)
{
struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1];
int err;
if (!attrs[TIPC_NLA_BEARER])
return -EINVAL;
err = nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX,
attrs[TIPC_NLA_BEARER], NULL);
if (err)
return err;
return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME,
nla_data(bearer[TIPC_NLA_BEARER_NAME]),
nla_len(bearer[TIPC_NLA_BEARER_NAME]));
}
static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
struct sk_buff *skb,
struct tipc_nl_compat_msg *msg)
{
struct nlattr *prop;
struct nlattr *bearer;
struct tipc_bearer_config *b;
b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
bearer = nla_nest_start(skb, TIPC_NLA_BEARER);
if (!bearer)
return -EMSGSIZE;
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
return -EMSGSIZE;
if (nla_put_u32(skb, TIPC_NLA_BEARER_DOMAIN, ntohl(b->disc_domain)))
return -EMSGSIZE;
if (ntohl(b->priority) <= TIPC_MAX_LINK_PRI) {
prop = nla_nest_start(skb, TIPC_NLA_BEARER_PROP);
if (!prop)
return -EMSGSIZE;
if (nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(b->priority)))
return -EMSGSIZE;
nla_nest_end(skb, prop);
}
nla_nest_end(skb, bearer);
return 0;
}
static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
struct sk_buff *skb,
struct tipc_nl_compat_msg *msg)
{
char *name;
struct nlattr *bearer;
name = (char *)TLV_DATA(msg->req);
bearer = nla_nest_start(skb, TIPC_NLA_BEARER);
if (!bearer)
return -EMSGSIZE;
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
return -EMSGSIZE;
nla_nest_end(skb, bearer);
return 0;
}
static inline u32 perc(u32 count, u32 total)
{
return (count * 100 + (total / 2)) / total;
}
static void __fill_bc_link_stat(struct tipc_nl_compat_msg *msg,
struct nlattr *prop[], struct nlattr *stats[])
{
tipc_tlv_sprintf(msg->rep, " Window:%u packets\n",
nla_get_u32(prop[TIPC_NLA_PROP_WIN]));
tipc_tlv_sprintf(msg->rep,
" RX packets:%u fragments:%u/%u bundles:%u/%u\n",
nla_get_u32(stats[TIPC_NLA_STATS_RX_INFO]),
nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTS]),
nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTED]),
nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLES]),
nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLED]));
tipc_tlv_sprintf(msg->rep,
" TX packets:%u fragments:%u/%u bundles:%u/%u\n",
nla_get_u32(stats[TIPC_NLA_STATS_TX_INFO]),
nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTS]),
nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTED]),
nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLES]),
nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLED]));
tipc_tlv_sprintf(msg->rep, " RX naks:%u defs:%u dups:%u\n",
nla_get_u32(stats[TIPC_NLA_STATS_RX_NACKS]),
nla_get_u32(stats[TIPC_NLA_STATS_RX_DEFERRED]),
nla_get_u32(stats[TIPC_NLA_STATS_DUPLICATES]));
tipc_tlv_sprintf(msg->rep, " TX naks:%u acks:%u dups:%u\n",
nla_get_u32(stats[TIPC_NLA_STATS_TX_NACKS]),
nla_get_u32(stats[TIPC_NLA_STATS_TX_ACKS]),
nla_get_u32(stats[TIPC_NLA_STATS_RETRANSMITTED]));
tipc_tlv_sprintf(msg->rep,
" Congestion link:%u Send queue max:%u avg:%u",
nla_get_u32(stats[TIPC_NLA_STATS_LINK_CONGS]),
nla_get_u32(stats[TIPC_NLA_STATS_MAX_QUEUE]),
nla_get_u32(stats[TIPC_NLA_STATS_AVG_QUEUE]));
}
static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
struct nlattr **attrs)
{
char *name;
struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
int err;
if (!attrs[TIPC_NLA_LINK])
return -EINVAL;
err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
NULL);
if (err)
return err;
if (!link[TIPC_NLA_LINK_PROP])
return -EINVAL;
err = nla_parse_nested(prop, TIPC_NLA_PROP_MAX,
link[TIPC_NLA_LINK_PROP], NULL);
if (err)
return err;
if (!link[TIPC_NLA_LINK_STATS])
return -EINVAL;
err = nla_parse_nested(stats, TIPC_NLA_STATS_MAX,
link[TIPC_NLA_LINK_STATS], NULL);
if (err)
return err;
name = (char *)TLV_DATA(msg->req);
if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
return 0;
tipc_tlv_sprintf(msg->rep, "\nLink <%s>\n",
nla_data(link[TIPC_NLA_LINK_NAME]));
if (link[TIPC_NLA_LINK_BROADCAST]) {
__fill_bc_link_stat(msg, prop, stats);
return 0;
}
if (link[TIPC_NLA_LINK_ACTIVE])
tipc_tlv_sprintf(msg->rep, " ACTIVE");
else if (link[TIPC_NLA_LINK_UP])
tipc_tlv_sprintf(msg->rep, " STANDBY");
else
tipc_tlv_sprintf(msg->rep, " DEFUNCT");
tipc_tlv_sprintf(msg->rep, " MTU:%u Priority:%u",
nla_get_u32(link[TIPC_NLA_LINK_MTU]),
nla_get_u32(prop[TIPC_NLA_PROP_PRIO]));
tipc_tlv_sprintf(msg->rep, " Tolerance:%u ms Window:%u packets\n",
nla_get_u32(prop[TIPC_NLA_PROP_TOL]),
nla_get_u32(prop[TIPC_NLA_PROP_WIN]));
tipc_tlv_sprintf(msg->rep,
" RX packets:%u fragments:%u/%u bundles:%u/%u\n",
nla_get_u32(link[TIPC_NLA_LINK_RX]) -
nla_get_u32(stats[TIPC_NLA_STATS_RX_INFO]),
nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTS]),
nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTED]),
nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLES]),
nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLED]));
tipc_tlv_sprintf(msg->rep,
" TX packets:%u fragments:%u/%u bundles:%u/%u\n",
nla_get_u32(link[TIPC_NLA_LINK_TX]) -
nla_get_u32(stats[TIPC_NLA_STATS_TX_INFO]),
nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTS]),
nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTED]),
nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLES]),
nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLED]));
tipc_tlv_sprintf(msg->rep,
" TX profile sample:%u packets average:%u octets\n",
nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_CNT]),
nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_TOT]) /
nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT]));
tipc_tlv_sprintf(msg->rep,
" 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% ",
perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P0]),
nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P1]),
nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P2]),
nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P3]),
nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])));
tipc_tlv_sprintf(msg->rep, "-16384:%u%% -32768:%u%% -66000:%u%%\n",
perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P4]),
nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P5]),
nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P6]),
nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])));
tipc_tlv_sprintf(msg->rep,
" RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
nla_get_u32(stats[TIPC_NLA_STATS_RX_STATES]),
nla_get_u32(stats[TIPC_NLA_STATS_RX_PROBES]),
nla_get_u32(stats[TIPC_NLA_STATS_RX_NACKS]),
nla_get_u32(stats[TIPC_NLA_STATS_RX_DEFERRED]),
nla_get_u32(stats[TIPC_NLA_STATS_DUPLICATES]));
tipc_tlv_sprintf(msg->rep,
" TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
nla_get_u32(stats[TIPC_NLA_STATS_TX_STATES]),
nla_get_u32(stats[TIPC_NLA_STATS_TX_PROBES]),
nla_get_u32(stats[TIPC_NLA_STATS_TX_NACKS]),
nla_get_u32(stats[TIPC_NLA_STATS_TX_ACKS]),
nla_get_u32(stats[TIPC_NLA_STATS_RETRANSMITTED]));
tipc_tlv_sprintf(msg->rep,
" Congestion link:%u Send queue max:%u avg:%u",
nla_get_u32(stats[TIPC_NLA_STATS_LINK_CONGS]),
nla_get_u32(stats[TIPC_NLA_STATS_MAX_QUEUE]),
nla_get_u32(stats[TIPC_NLA_STATS_AVG_QUEUE]));
return 0;
}
static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
struct nlattr **attrs)
{
struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
struct tipc_link_info link_info;
int err;
if (!attrs[TIPC_NLA_LINK])
return -EINVAL;
err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
NULL);
if (err)
return err;
link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));
return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
&link_info, sizeof(link_info));
}
static int __tipc_add_link_prop(struct sk_buff *skb,
struct tipc_nl_compat_msg *msg,
struct tipc_link_config *lc)
{
switch (msg->cmd) {
case TIPC_CMD_SET_LINK_PRI:
return nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(lc->value));
case TIPC_CMD_SET_LINK_TOL:
return nla_put_u32(skb, TIPC_NLA_PROP_TOL, ntohl(lc->value));
case TIPC_CMD_SET_LINK_WINDOW:
return nla_put_u32(skb, TIPC_NLA_PROP_WIN, ntohl(lc->value));
}
return -EINVAL;
}
static int tipc_nl_compat_media_set(struct sk_buff *skb,
struct tipc_nl_compat_msg *msg)
{
struct nlattr *prop;
struct nlattr *media;
struct tipc_link_config *lc;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
media = nla_nest_start(skb, TIPC_NLA_MEDIA);
if (!media)
return -EMSGSIZE;
if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
return -EMSGSIZE;
prop = nla_nest_start(skb, TIPC_NLA_MEDIA_PROP);
if (!prop)
return -EMSGSIZE;
__tipc_add_link_prop(skb, msg, lc);
nla_nest_end(skb, prop);
nla_nest_end(skb, media);
return 0;
}
static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
struct tipc_nl_compat_msg *msg)
{
struct nlattr *prop;
struct nlattr *bearer;
struct tipc_link_config *lc;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
bearer = nla_nest_start(skb, TIPC_NLA_BEARER);
if (!bearer)
return -EMSGSIZE;
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
return -EMSGSIZE;
prop = nla_nest_start(skb, TIPC_NLA_BEARER_PROP);
if (!prop)
return -EMSGSIZE;
__tipc_add_link_prop(skb, msg, lc);
nla_nest_end(skb, prop);
nla_nest_end(skb, bearer);
return 0;
}
static int __tipc_nl_compat_link_set(struct sk_buff *skb,
struct tipc_nl_compat_msg *msg)
{
struct nlattr *prop;
struct nlattr *link;
struct tipc_link_config *lc;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
link = nla_nest_start(skb, TIPC_NLA_LINK);
if (!link)
return -EMSGSIZE;
if (nla_put_string(skb, TIPC_NLA_LINK_NAME, lc->name))
return -EMSGSIZE;
prop = nla_nest_start(skb, TIPC_NLA_LINK_PROP);
if (!prop)
return -EMSGSIZE;
__tipc_add_link_prop(skb, msg, lc);
nla_nest_end(skb, prop);
nla_nest_end(skb, link);
return 0;
}
static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
struct sk_buff *skb,
struct tipc_nl_compat_msg *msg)
{
struct tipc_link_config *lc;
struct tipc_bearer *bearer;
struct tipc_media *media;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
media = tipc_media_find(lc->name);
if (media) {
cmd->doit = &tipc_nl_media_set;
return tipc_nl_compat_media_set(skb, msg);
}
bearer = tipc_bearer_find(msg->net, lc->name);
if (bearer) {
cmd->doit = &tipc_nl_bearer_set;
return tipc_nl_compat_bearer_set(skb, msg);
}
return __tipc_nl_compat_link_set(skb, msg);
}
static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
struct sk_buff *skb,
struct tipc_nl_compat_msg *msg)
{
char *name;
struct nlattr *link;
name = (char *)TLV_DATA(msg->req);
link = nla_nest_start(skb, TIPC_NLA_LINK);
if (!link)
return -EMSGSIZE;
if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
return -EMSGSIZE;
nla_nest_end(skb, link);
return 0;
}
static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
{
int i;
u32 depth;
struct tipc_name_table_query *ntq;
static const char * const header[] = {
"Type ",
"Lower Upper ",
"Port Identity ",
"Publication Scope"
};
ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
depth = ntohl(ntq->depth);
if (depth > 4)
depth = 4;
for (i = 0; i < depth; i++)
tipc_tlv_sprintf(msg->rep, header[i]);
tipc_tlv_sprintf(msg->rep, "\n");
return 0;
}
static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
struct nlattr **attrs)
{
char port_str[27];
struct tipc_name_table_query *ntq;
struct nlattr *nt[TIPC_NLA_NAME_TABLE_MAX + 1];
struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1];
u32 node, depth, type, lowbound, upbound;
static const char * const scope_str[] = {"", " zone", " cluster",
" node"};
int err;
if (!attrs[TIPC_NLA_NAME_TABLE])
return -EINVAL;
err = nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX,
attrs[TIPC_NLA_NAME_TABLE], NULL);
if (err)
return err;
if (!nt[TIPC_NLA_NAME_TABLE_PUBL])
return -EINVAL;
err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX,
nt[TIPC_NLA_NAME_TABLE_PUBL], NULL);
if (err)
return err;
ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
depth = ntohl(ntq->depth);
type = ntohl(ntq->type);
lowbound = ntohl(ntq->lowbound);
upbound = ntohl(ntq->upbound);
if (!(depth & TIPC_NTQ_ALLTYPES) &&
(type != nla_get_u32(publ[TIPC_NLA_PUBL_TYPE])))
return 0;
if (lowbound && (lowbound > nla_get_u32(publ[TIPC_NLA_PUBL_UPPER])))
return 0;
if (upbound && (upbound < nla_get_u32(publ[TIPC_NLA_PUBL_LOWER])))
return 0;
tipc_tlv_sprintf(msg->rep, "%-10u ",
nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]));
if (depth == 1)
goto out;
tipc_tlv_sprintf(msg->rep, "%-10u %-10u ",
nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]),
nla_get_u32(publ[TIPC_NLA_PUBL_UPPER]));
if (depth == 2)
goto out;
node = nla_get_u32(publ[TIPC_NLA_PUBL_NODE]);
sprintf(port_str, "<%u.%u.%u:%u>", tipc_zone(node), tipc_cluster(node),
tipc_node(node), nla_get_u32(publ[TIPC_NLA_PUBL_REF]));
tipc_tlv_sprintf(msg->rep, "%-26s ", port_str);
if (depth == 3)
goto out;
tipc_tlv_sprintf(msg->rep, "%-10u %s",
nla_get_u32(publ[TIPC_NLA_PUBL_KEY]),
scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]);
out:
tipc_tlv_sprintf(msg->rep, "\n");
return 0;
}
static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg,
struct nlattr **attrs)
{
u32 type, lower, upper;
struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1];
int err;
if (!attrs[TIPC_NLA_PUBL])
return -EINVAL;
err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL],
NULL);
if (err)
return err;
type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]);
lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]);
upper = nla_get_u32(publ[TIPC_NLA_PUBL_UPPER]);
if (lower == upper)
tipc_tlv_sprintf(msg->rep, " {%u,%u}", type, lower);
else
tipc_tlv_sprintf(msg->rep, " {%u,%u,%u}", type, lower, upper);
return 0;
}
static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock)
{
int err;
void *hdr;
struct nlattr *nest;
struct sk_buff *args;
struct tipc_nl_compat_cmd_dump dump;
args = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!args)
return -ENOMEM;
hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI,
TIPC_NL_PUBL_GET);
nest = nla_nest_start(args, TIPC_NLA_SOCK);
if (!nest) {
kfree_skb(args);
return -EMSGSIZE;
}
if (nla_put_u32(args, TIPC_NLA_SOCK_REF, sock)) {
kfree_skb(args);
return -EMSGSIZE;
}
nla_nest_end(args, nest);
genlmsg_end(args, hdr);
dump.dumpit = tipc_nl_publ_dump;
dump.format = __tipc_nl_compat_publ_dump;
err = __tipc_nl_compat_dumpit(&dump, msg, args);
kfree_skb(args);
return err;
}
static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg,
struct nlattr **attrs)
{
int err;
u32 sock_ref;
struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
if (!attrs[TIPC_NLA_SOCK])
return -EINVAL;
err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK],
NULL);
if (err)
return err;
sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
tipc_tlv_sprintf(msg->rep, "%u:", sock_ref);
if (sock[TIPC_NLA_SOCK_CON]) {
u32 node;
struct nlattr *con[TIPC_NLA_CON_MAX + 1];
nla_parse_nested(con, TIPC_NLA_CON_MAX, sock[TIPC_NLA_SOCK_CON],
NULL);
node = nla_get_u32(con[TIPC_NLA_CON_NODE]);
tipc_tlv_sprintf(msg->rep, " connected to <%u.%u.%u:%u>",
tipc_zone(node),
tipc_cluster(node),
tipc_node(node),
nla_get_u32(con[TIPC_NLA_CON_SOCK]));
if (con[TIPC_NLA_CON_FLAG])
tipc_tlv_sprintf(msg->rep, " via {%u,%u}\n",
nla_get_u32(con[TIPC_NLA_CON_TYPE]),
nla_get_u32(con[TIPC_NLA_CON_INST]));
else
tipc_tlv_sprintf(msg->rep, "\n");
} else if (sock[TIPC_NLA_SOCK_HAS_PUBL]) {
tipc_tlv_sprintf(msg->rep, " bound to");
err = tipc_nl_compat_publ_dump(msg, sock_ref);
if (err)
return err;
}
tipc_tlv_sprintf(msg->rep, "\n");
return 0;
}
static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg,
struct nlattr **attrs)
{
struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1];
int err;
if (!attrs[TIPC_NLA_MEDIA])
return -EINVAL;
err = nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA],
NULL);
if (err)
return err;
return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME,
nla_data(media[TIPC_NLA_MEDIA_NAME]),
nla_len(media[TIPC_NLA_MEDIA_NAME]));
}
static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg,
struct nlattr **attrs)
{
struct tipc_node_info node_info;
struct nlattr *node[TIPC_NLA_NODE_MAX + 1];
int err;
if (!attrs[TIPC_NLA_NODE])
return -EINVAL;
err = nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE],
NULL);
if (err)
return err;
node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR]));
node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP]));
return tipc_add_tlv(msg->rep, TIPC_TLV_NODE_INFO, &node_info,
sizeof(node_info));
}
static int tipc_nl_compat_net_set(struct tipc_nl_compat_cmd_doit *cmd,
struct sk_buff *skb,
struct tipc_nl_compat_msg *msg)
{
u32 val;
struct nlattr *net;
val = ntohl(*(__be32 *)TLV_DATA(msg->req));
net = nla_nest_start(skb, TIPC_NLA_NET);
if (!net)
return -EMSGSIZE;
if (msg->cmd == TIPC_CMD_SET_NODE_ADDR) {
if (nla_put_u32(skb, TIPC_NLA_NET_ADDR, val))
return -EMSGSIZE;
} else if (msg->cmd == TIPC_CMD_SET_NETID) {
if (nla_put_u32(skb, TIPC_NLA_NET_ID, val))
return -EMSGSIZE;
}
nla_nest_end(skb, net);
return 0;
}
static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg,
struct nlattr **attrs)
{
__be32 id;
struct nlattr *net[TIPC_NLA_NET_MAX + 1];
int err;
if (!attrs[TIPC_NLA_NET])
return -EINVAL;
err = nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET],
NULL);
if (err)
return err;
id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID]));
return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id));
}
static int tipc_cmd_show_stats_compat(struct tipc_nl_compat_msg *msg)
{
msg->rep = tipc_tlv_alloc(ULTRA_STRING_MAX_LEN);
if (!msg->rep)
return -ENOMEM;
tipc_tlv_init(msg->rep, TIPC_TLV_ULTRA_STRING);
tipc_tlv_sprintf(msg->rep, "TIPC version " TIPC_MOD_VER "\n");
return 0;
}
static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
{
struct tipc_nl_compat_cmd_dump dump;
struct tipc_nl_compat_cmd_doit doit;
memset(&dump, 0, sizeof(dump));
memset(&doit, 0, sizeof(doit));
switch (msg->cmd) {
case TIPC_CMD_NOOP:
msg->rep = tipc_tlv_alloc(0);
if (!msg->rep)
return -ENOMEM;
return 0;
case TIPC_CMD_GET_BEARER_NAMES:
msg->rep_size = MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME);
dump.dumpit = tipc_nl_bearer_dump;
dump.format = tipc_nl_compat_bearer_dump;
return tipc_nl_compat_dumpit(&dump, msg);
case TIPC_CMD_ENABLE_BEARER:
msg->req_type = TIPC_TLV_BEARER_CONFIG;
doit.doit = tipc_nl_bearer_enable;
doit.transcode = tipc_nl_compat_bearer_enable;
return tipc_nl_compat_doit(&doit, msg);
case TIPC_CMD_DISABLE_BEARER:
msg->req_type = TIPC_TLV_BEARER_NAME;
doit.doit = tipc_nl_bearer_disable;
doit.transcode = tipc_nl_compat_bearer_disable;
return tipc_nl_compat_doit(&doit, msg);
case TIPC_CMD_SHOW_LINK_STATS:
msg->req_type = TIPC_TLV_LINK_NAME;
msg->rep_size = ULTRA_STRING_MAX_LEN;
msg->rep_type = TIPC_TLV_ULTRA_STRING;
dump.dumpit = tipc_nl_node_dump_link;
dump.format = tipc_nl_compat_link_stat_dump;
return tipc_nl_compat_dumpit(&dump, msg);
case TIPC_CMD_GET_LINKS:
msg->req_type = TIPC_TLV_NET_ADDR;
msg->rep_size = ULTRA_STRING_MAX_LEN;
dump.dumpit = tipc_nl_node_dump_link;
dump.format = tipc_nl_compat_link_dump;
return tipc_nl_compat_dumpit(&dump, msg);
case TIPC_CMD_SET_LINK_TOL:
case TIPC_CMD_SET_LINK_PRI:
case TIPC_CMD_SET_LINK_WINDOW:
msg->req_type = TIPC_TLV_LINK_CONFIG;
doit.doit = tipc_nl_node_set_link;
doit.transcode = tipc_nl_compat_link_set;
return tipc_nl_compat_doit(&doit, msg);
case TIPC_CMD_RESET_LINK_STATS:
msg->req_type = TIPC_TLV_LINK_NAME;
doit.doit = tipc_nl_node_reset_link_stats;
doit.transcode = tipc_nl_compat_link_reset_stats;
return tipc_nl_compat_doit(&doit, msg);
case TIPC_CMD_SHOW_NAME_TABLE:
msg->req_type = TIPC_TLV_NAME_TBL_QUERY;
msg->rep_size = ULTRA_STRING_MAX_LEN;
msg->rep_type = TIPC_TLV_ULTRA_STRING;
dump.header = tipc_nl_compat_name_table_dump_header;
dump.dumpit = tipc_nl_name_table_dump;
dump.format = tipc_nl_compat_name_table_dump;
return tipc_nl_compat_dumpit(&dump, msg);
case TIPC_CMD_SHOW_PORTS:
msg->rep_size = ULTRA_STRING_MAX_LEN;
msg->rep_type = TIPC_TLV_ULTRA_STRING;
dump.dumpit = tipc_nl_sk_dump;
dump.format = tipc_nl_compat_sk_dump;
return tipc_nl_compat_dumpit(&dump, msg);
case TIPC_CMD_GET_MEDIA_NAMES:
msg->rep_size = MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME);
dump.dumpit = tipc_nl_media_dump;
dump.format = tipc_nl_compat_media_dump;
return tipc_nl_compat_dumpit(&dump, msg);
case TIPC_CMD_GET_NODES:
msg->rep_size = ULTRA_STRING_MAX_LEN;
dump.dumpit = tipc_nl_node_dump;
dump.format = tipc_nl_compat_node_dump;
return tipc_nl_compat_dumpit(&dump, msg);
case TIPC_CMD_SET_NODE_ADDR:
msg->req_type = TIPC_TLV_NET_ADDR;
doit.doit = tipc_nl_net_set;
doit.transcode = tipc_nl_compat_net_set;
return tipc_nl_compat_doit(&doit, msg);
case TIPC_CMD_SET_NETID:
msg->req_type = TIPC_TLV_UNSIGNED;
doit.doit = tipc_nl_net_set;
doit.transcode = tipc_nl_compat_net_set;
return tipc_nl_compat_doit(&doit, msg);
case TIPC_CMD_GET_NETID:
msg->rep_size = sizeof(u32);
dump.dumpit = tipc_nl_net_dump;
dump.format = tipc_nl_compat_net_dump;
return tipc_nl_compat_dumpit(&dump, msg);
case TIPC_CMD_SHOW_STATS:
return tipc_cmd_show_stats_compat(msg);
}
return -EOPNOTSUPP;
}
static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
{
int err;
int len;
struct tipc_nl_compat_msg msg;
struct nlmsghdr *req_nlh;
struct nlmsghdr *rep_nlh;
struct tipc_genlmsghdr *req_userhdr = info->userhdr;
memset(&msg, 0, sizeof(msg));
req_nlh = (struct nlmsghdr *)skb->data;
msg.req = nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN;
msg.cmd = req_userhdr->cmd;
msg.net = genl_info_net(info);
msg.dst_sk = skb->sk;
if ((msg.cmd & 0xC000) && (!netlink_net_capable(skb, CAP_NET_ADMIN))) {
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_NET_ADMIN);
err = -EACCES;
goto send;
}
len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
if (len && !TLV_OK(msg.req, len)) {
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
err = -EOPNOTSUPP;
goto send;
}
err = tipc_nl_compat_handle(&msg);
if ((err == -EOPNOTSUPP) || (err == -EPERM))
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
else if (err == -EINVAL)
msg.rep = tipc_get_err_tlv(TIPC_CFG_TLV_ERROR);
send:
if (!msg.rep)
return err;
len = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
skb_push(msg.rep, len);
rep_nlh = nlmsg_hdr(msg.rep);
memcpy(rep_nlh, info->nlhdr, len);
rep_nlh->nlmsg_len = msg.rep->len;
genlmsg_unicast(msg.net, msg.rep, NETLINK_CB(skb).portid);
return err;
}
static struct genl_family tipc_genl_compat_family = {
.id = GENL_ID_GENERATE,
.name = TIPC_GENL_NAME,
.version = TIPC_GENL_VERSION,
.hdrsize = TIPC_GENL_HDRLEN,
.maxattr = 0,
.netnsok = true,
};
static struct genl_ops tipc_genl_compat_ops[] = {
{
.cmd = TIPC_GENL_CMD,
.doit = tipc_nl_compat_recv,
},
};
int tipc_netlink_compat_start(void)
{
int res;
res = genl_register_family_with_ops(&tipc_genl_compat_family,
tipc_genl_compat_ops);
if (res) {
pr_err("Failed to register legacy compat interface\n");
return res;
}
return 0;
}
void tipc_netlink_compat_stop(void)
{
genl_unregister_family(&tipc_genl_compat_family);
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_5098_0 |
crossvul-cpp_data_good_2375_1 | /* lib/rpc/svc_auth_gss.c */
/*
Copyright (c) 2000 The Regents of the University of Michigan.
All rights reserved.
Copyright (c) 2000 Dug Song <dugsong@UMICH.EDU>.
All rights reserved, all wrongs reversed.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Id: svc_auth_gss.c,v 1.28 2002/10/15 21:29:36 kwc Exp
*/
#include "k5-platform.h"
#include <gssrpc/rpc.h>
#include <gssrpc/auth_gssapi.h>
#ifdef HAVE_HEIMDAL
#include <gssapi.h>
#define gss_nt_service_name GSS_C_NT_HOSTBASED_SERVICE
#else
#include <gssapi/gssapi.h>
#include <gssapi/gssapi_generic.h>
#endif
#ifdef DEBUG_GSSAPI
int svc_debug_gss = DEBUG_GSSAPI;
#endif
#ifdef SPKM
#ifndef OID_EQ
#define g_OID_equal(o1,o2) \
(((o1)->length == (o2)->length) && \
((o1)->elements != 0) && ((o2)->elements != 0) && \
(memcmp((o1)->elements,(o2)->elements,(int) (o1)->length) == 0))
#define OID_EQ 1
#endif /* OID_EQ */
extern const gss_OID_desc * const gss_mech_spkm3;
#endif /* SPKM */
extern SVCAUTH svc_auth_none;
static auth_gssapi_log_badauth_func log_badauth = NULL;
static caddr_t log_badauth_data = NULL;
static auth_gssapi_log_badauth2_func log_badauth2 = NULL;
static caddr_t log_badauth2_data = NULL;
static auth_gssapi_log_badverf_func log_badverf = NULL;
static caddr_t log_badverf_data = NULL;
static auth_gssapi_log_miscerr_func log_miscerr = NULL;
static caddr_t log_miscerr_data = NULL;
#define LOG_MISCERR(arg) if (log_miscerr) \
(*log_miscerr)(rqst, msg, arg, log_miscerr_data)
static bool_t svcauth_gss_destroy(SVCAUTH *);
static bool_t svcauth_gss_wrap(SVCAUTH *, XDR *, xdrproc_t, caddr_t);
static bool_t svcauth_gss_unwrap(SVCAUTH *, XDR *, xdrproc_t, caddr_t);
static bool_t svcauth_gss_nextverf(struct svc_req *, u_int);
struct svc_auth_ops svc_auth_gss_ops = {
svcauth_gss_wrap,
svcauth_gss_unwrap,
svcauth_gss_destroy
};
struct svc_rpc_gss_data {
bool_t established; /* context established */
gss_ctx_id_t ctx; /* context id */
struct rpc_gss_sec sec; /* security triple */
gss_buffer_desc cname; /* GSS client name */
u_int seq; /* sequence number */
u_int win; /* sequence window */
u_int seqlast; /* last sequence number */
uint32_t seqmask; /* bitmask of seqnums */
gss_name_t client_name; /* unparsed name string */
gss_buffer_desc checksum; /* so we can free it */
};
#define SVCAUTH_PRIVATE(auth) \
(*(struct svc_rpc_gss_data **)&(auth)->svc_ah_private)
/* Global server credentials. */
gss_cred_id_t svcauth_gss_creds;
static gss_name_t svcauth_gss_name = NULL;
bool_t
svcauth_gss_set_svc_name(gss_name_t name)
{
OM_uint32 maj_stat, min_stat;
log_debug("in svcauth_gss_set_svc_name()");
if (svcauth_gss_name != NULL) {
maj_stat = gss_release_name(&min_stat, &svcauth_gss_name);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_release_name", maj_stat, min_stat);
return (FALSE);
}
svcauth_gss_name = NULL;
}
if (svcauth_gss_name == GSS_C_NO_NAME)
return (TRUE);
maj_stat = gss_duplicate_name(&min_stat, name, &svcauth_gss_name);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_duplicate_name", maj_stat, min_stat);
return (FALSE);
}
return (TRUE);
}
static bool_t
svcauth_gss_acquire_cred(void)
{
OM_uint32 maj_stat, min_stat;
log_debug("in svcauth_gss_acquire_cred()");
maj_stat = gss_acquire_cred(&min_stat, svcauth_gss_name, 0,
GSS_C_NULL_OID_SET, GSS_C_ACCEPT,
&svcauth_gss_creds, NULL, NULL);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_acquire_cred", maj_stat, min_stat);
return (FALSE);
}
return (TRUE);
}
static bool_t
svcauth_gss_release_cred(void)
{
OM_uint32 maj_stat, min_stat;
log_debug("in svcauth_gss_release_cred()");
maj_stat = gss_release_cred(&min_stat, &svcauth_gss_creds);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_release_cred", maj_stat, min_stat);
return (FALSE);
}
svcauth_gss_creds = NULL;
return (TRUE);
}
/* Invoke log_badauth callbacks for an authentication failure. */
static void
badauth(OM_uint32 maj, OM_uint32 minor, SVCXPRT *xprt)
{
if (log_badauth != NULL)
(*log_badauth)(maj, minor, &xprt->xp_raddr, log_badauth_data);
if (log_badauth2 != NULL)
(*log_badauth2)(maj, minor, xprt, log_badauth2_data);
}
static bool_t
svcauth_gss_accept_sec_context(struct svc_req *rqst,
struct rpc_gss_init_res *gr)
{
struct svc_rpc_gss_data *gd;
struct rpc_gss_cred *gc;
gss_buffer_desc recv_tok, seqbuf;
gss_OID mech;
OM_uint32 maj_stat = 0, min_stat = 0, ret_flags, seq;
log_debug("in svcauth_gss_accept_context()");
gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth);
gc = (struct rpc_gss_cred *)rqst->rq_clntcred;
memset(gr, 0, sizeof(*gr));
/* Deserialize arguments. */
memset(&recv_tok, 0, sizeof(recv_tok));
if (!svc_getargs(rqst->rq_xprt, xdr_rpc_gss_init_args,
(caddr_t)&recv_tok))
return (FALSE);
gr->gr_major = gss_accept_sec_context(&gr->gr_minor,
&gd->ctx,
svcauth_gss_creds,
&recv_tok,
GSS_C_NO_CHANNEL_BINDINGS,
&gd->client_name,
&mech,
&gr->gr_token,
&ret_flags,
NULL,
NULL);
svc_freeargs(rqst->rq_xprt, xdr_rpc_gss_init_args, (caddr_t)&recv_tok);
log_status("accept_sec_context", gr->gr_major, gr->gr_minor);
if (gr->gr_major != GSS_S_COMPLETE &&
gr->gr_major != GSS_S_CONTINUE_NEEDED) {
badauth(gr->gr_major, gr->gr_minor, rqst->rq_xprt);
gd->ctx = GSS_C_NO_CONTEXT;
goto errout;
}
gr->gr_ctx.value = "xxxx";
gr->gr_ctx.length = 4;
/* gr->gr_win = 0x00000005; ANDROS: for debugging linux kernel version... */
gr->gr_win = sizeof(gd->seqmask) * 8;
/* Save client info. */
gd->sec.mech = mech;
gd->sec.qop = GSS_C_QOP_DEFAULT;
gd->sec.svc = gc->gc_svc;
gd->seq = gc->gc_seq;
gd->win = gr->gr_win;
if (gr->gr_major == GSS_S_COMPLETE) {
#ifdef SPKM
/* spkm3: no src_name (anonymous) */
if(!g_OID_equal(gss_mech_spkm3, mech)) {
#endif
maj_stat = gss_display_name(&min_stat, gd->client_name,
&gd->cname, &gd->sec.mech);
#ifdef SPKM
}
#endif
if (maj_stat != GSS_S_COMPLETE) {
log_status("display_name", maj_stat, min_stat);
goto errout;
}
#ifdef DEBUG
#ifdef HAVE_HEIMDAL
log_debug("accepted context for %.*s with "
"<mech {}, qop %d, svc %d>",
gd->cname.length, (char *)gd->cname.value,
gd->sec.qop, gd->sec.svc);
#else
{
gss_buffer_desc mechname;
gss_oid_to_str(&min_stat, mech, &mechname);
log_debug("accepted context for %.*s with "
"<mech %.*s, qop %d, svc %d>",
gd->cname.length, (char *)gd->cname.value,
mechname.length, (char *)mechname.value,
gd->sec.qop, gd->sec.svc);
gss_release_buffer(&min_stat, &mechname);
}
#endif
#endif /* DEBUG */
seq = htonl(gr->gr_win);
seqbuf.value = &seq;
seqbuf.length = sizeof(seq);
gss_release_buffer(&min_stat, &gd->checksum);
maj_stat = gss_sign(&min_stat, gd->ctx, GSS_C_QOP_DEFAULT,
&seqbuf, &gd->checksum);
if (maj_stat != GSS_S_COMPLETE) {
goto errout;
}
rqst->rq_xprt->xp_verf.oa_flavor = RPCSEC_GSS;
rqst->rq_xprt->xp_verf.oa_base = gd->checksum.value;
rqst->rq_xprt->xp_verf.oa_length = gd->checksum.length;
}
return (TRUE);
errout:
gss_release_buffer(&min_stat, &gr->gr_token);
return (FALSE);
}
static bool_t
svcauth_gss_validate(struct svc_req *rqst, struct svc_rpc_gss_data *gd, struct rpc_msg *msg)
{
struct opaque_auth *oa;
gss_buffer_desc rpcbuf, checksum;
OM_uint32 maj_stat, min_stat, qop_state;
u_char rpchdr[128];
int32_t *buf;
log_debug("in svcauth_gss_validate()");
memset(rpchdr, 0, sizeof(rpchdr));
/* XXX - Reconstruct RPC header for signing (from xdr_callmsg). */
oa = &msg->rm_call.cb_cred;
if (oa->oa_length > MAX_AUTH_BYTES)
return (FALSE);
/* 8 XDR units from the IXDR macro calls. */
if (sizeof(rpchdr) < (8 * BYTES_PER_XDR_UNIT +
RNDUP(oa->oa_length)))
return (FALSE);
buf = (int32_t *)(void *)rpchdr;
IXDR_PUT_LONG(buf, msg->rm_xid);
IXDR_PUT_ENUM(buf, msg->rm_direction);
IXDR_PUT_LONG(buf, msg->rm_call.cb_rpcvers);
IXDR_PUT_LONG(buf, msg->rm_call.cb_prog);
IXDR_PUT_LONG(buf, msg->rm_call.cb_vers);
IXDR_PUT_LONG(buf, msg->rm_call.cb_proc);
IXDR_PUT_ENUM(buf, oa->oa_flavor);
IXDR_PUT_LONG(buf, oa->oa_length);
if (oa->oa_length) {
memcpy((caddr_t)buf, oa->oa_base, oa->oa_length);
buf += RNDUP(oa->oa_length) / sizeof(int32_t);
}
rpcbuf.value = rpchdr;
rpcbuf.length = (u_char *)buf - rpchdr;
checksum.value = msg->rm_call.cb_verf.oa_base;
checksum.length = msg->rm_call.cb_verf.oa_length;
maj_stat = gss_verify_mic(&min_stat, gd->ctx, &rpcbuf, &checksum,
&qop_state);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_verify_mic", maj_stat, min_stat);
if (log_badverf != NULL)
(*log_badverf)(gd->client_name,
svcauth_gss_name,
rqst, msg, log_badverf_data);
return (FALSE);
}
return (TRUE);
}
static bool_t
svcauth_gss_nextverf(struct svc_req *rqst, u_int num)
{
struct svc_rpc_gss_data *gd;
gss_buffer_desc signbuf;
OM_uint32 maj_stat, min_stat;
log_debug("in svcauth_gss_nextverf()");
if (rqst->rq_xprt->xp_auth == NULL)
return (FALSE);
gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth);
gss_release_buffer(&min_stat, &gd->checksum);
signbuf.value = #
signbuf.length = sizeof(num);
maj_stat = gss_get_mic(&min_stat, gd->ctx, gd->sec.qop,
&signbuf, &gd->checksum);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_get_mic", maj_stat, min_stat);
return (FALSE);
}
rqst->rq_xprt->xp_verf.oa_flavor = RPCSEC_GSS;
rqst->rq_xprt->xp_verf.oa_base = (caddr_t)gd->checksum.value;
rqst->rq_xprt->xp_verf.oa_length = (u_int)gd->checksum.length;
return (TRUE);
}
enum auth_stat
gssrpc__svcauth_gss(struct svc_req *rqst, struct rpc_msg *msg,
bool_t *no_dispatch)
{
enum auth_stat retstat;
XDR xdrs;
SVCAUTH *auth;
struct svc_rpc_gss_data *gd;
struct rpc_gss_cred *gc;
struct rpc_gss_init_res gr;
int call_stat, offset;
OM_uint32 min_stat;
log_debug("in svcauth_gss()");
/* Initialize reply. */
rqst->rq_xprt->xp_verf = gssrpc__null_auth;
/* Allocate and set up server auth handle. */
if (rqst->rq_xprt->xp_auth == NULL ||
rqst->rq_xprt->xp_auth == &svc_auth_none) {
if ((auth = calloc(sizeof(*auth), 1)) == NULL) {
fprintf(stderr, "svcauth_gss: out_of_memory\n");
return (AUTH_FAILED);
}
if ((gd = calloc(sizeof(*gd), 1)) == NULL) {
fprintf(stderr, "svcauth_gss: out_of_memory\n");
return (AUTH_FAILED);
}
auth->svc_ah_ops = &svc_auth_gss_ops;
SVCAUTH_PRIVATE(auth) = gd;
rqst->rq_xprt->xp_auth = auth;
}
else gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth);
log_debug("xp_auth=%p, gd=%p", rqst->rq_xprt->xp_auth, gd);
/* Deserialize client credentials. */
if (rqst->rq_cred.oa_length <= 0)
return (AUTH_BADCRED);
gc = (struct rpc_gss_cred *)rqst->rq_clntcred;
memset(gc, 0, sizeof(*gc));
log_debug("calling xdrmem_create()");
log_debug("oa_base=%p, oa_length=%u", rqst->rq_cred.oa_base,
rqst->rq_cred.oa_length);
xdrmem_create(&xdrs, rqst->rq_cred.oa_base,
rqst->rq_cred.oa_length, XDR_DECODE);
log_debug("xdrmem_create() returned");
if (!xdr_rpc_gss_cred(&xdrs, gc)) {
log_debug("xdr_rpc_gss_cred() failed");
XDR_DESTROY(&xdrs);
return (AUTH_BADCRED);
}
XDR_DESTROY(&xdrs);
retstat = AUTH_FAILED;
#define ret_freegc(code) do { retstat = code; goto freegc; } while (0)
/* Check version. */
if (gc->gc_v != RPCSEC_GSS_VERSION)
ret_freegc (AUTH_BADCRED);
/* Check RPCSEC_GSS service. */
if (gc->gc_svc != RPCSEC_GSS_SVC_NONE &&
gc->gc_svc != RPCSEC_GSS_SVC_INTEGRITY &&
gc->gc_svc != RPCSEC_GSS_SVC_PRIVACY)
ret_freegc (AUTH_BADCRED);
/* Check sequence number. */
if (gd->established) {
if (gc->gc_seq > MAXSEQ)
ret_freegc (RPCSEC_GSS_CTXPROBLEM);
if ((offset = gd->seqlast - gc->gc_seq) < 0) {
gd->seqlast = gc->gc_seq;
offset = 0 - offset;
gd->seqmask <<= offset;
offset = 0;
} else if ((u_int)offset >= gd->win ||
(gd->seqmask & (1 << offset))) {
*no_dispatch = 1;
ret_freegc (RPCSEC_GSS_CTXPROBLEM);
}
gd->seq = gc->gc_seq;
gd->seqmask |= (1 << offset);
}
if (gd->established) {
rqst->rq_clntname = (char *)gd->client_name;
rqst->rq_svccred = (char *)gd->ctx;
}
/* Handle RPCSEC_GSS control procedure. */
switch (gc->gc_proc) {
case RPCSEC_GSS_INIT:
case RPCSEC_GSS_CONTINUE_INIT:
if (rqst->rq_proc != NULLPROC)
ret_freegc (AUTH_FAILED); /* XXX ? */
if (!svcauth_gss_acquire_cred())
ret_freegc (AUTH_FAILED);
if (!svcauth_gss_accept_sec_context(rqst, &gr))
ret_freegc (AUTH_REJECTEDCRED);
if (!svcauth_gss_nextverf(rqst, htonl(gr.gr_win))) {
gss_release_buffer(&min_stat, &gr.gr_token);
ret_freegc (AUTH_FAILED);
}
*no_dispatch = TRUE;
call_stat = svc_sendreply(rqst->rq_xprt, xdr_rpc_gss_init_res,
(caddr_t)&gr);
gss_release_buffer(&min_stat, &gr.gr_token);
gss_release_buffer(&min_stat, &gd->checksum);
if (!call_stat)
ret_freegc (AUTH_FAILED);
if (gr.gr_major == GSS_S_COMPLETE)
gd->established = TRUE;
break;
case RPCSEC_GSS_DATA:
if (!svcauth_gss_validate(rqst, gd, msg))
ret_freegc (RPCSEC_GSS_CREDPROBLEM);
if (!svcauth_gss_nextverf(rqst, htonl(gc->gc_seq)))
ret_freegc (AUTH_FAILED);
break;
case RPCSEC_GSS_DESTROY:
if (rqst->rq_proc != NULLPROC)
ret_freegc (AUTH_FAILED); /* XXX ? */
if (!svcauth_gss_validate(rqst, gd, msg))
ret_freegc (RPCSEC_GSS_CREDPROBLEM);
if (!svcauth_gss_nextverf(rqst, htonl(gc->gc_seq)))
ret_freegc (AUTH_FAILED);
*no_dispatch = TRUE;
call_stat = svc_sendreply(rqst->rq_xprt,
xdr_void, (caddr_t)NULL);
log_debug("sendreply in destroy: %d", call_stat);
if (!svcauth_gss_release_cred())
ret_freegc (AUTH_FAILED);
SVCAUTH_DESTROY(rqst->rq_xprt->xp_auth);
rqst->rq_xprt->xp_auth = &svc_auth_none;
break;
default:
ret_freegc (AUTH_REJECTEDCRED);
break;
}
retstat = AUTH_OK;
freegc:
xdr_free(xdr_rpc_gss_cred, gc);
log_debug("returning %d from svcauth_gss()", retstat);
return (retstat);
}
static bool_t
svcauth_gss_destroy(SVCAUTH *auth)
{
struct svc_rpc_gss_data *gd;
OM_uint32 min_stat;
log_debug("in svcauth_gss_destroy()");
gd = SVCAUTH_PRIVATE(auth);
gss_delete_sec_context(&min_stat, &gd->ctx, GSS_C_NO_BUFFER);
gss_release_buffer(&min_stat, &gd->cname);
gss_release_buffer(&min_stat, &gd->checksum);
if (gd->client_name)
gss_release_name(&min_stat, &gd->client_name);
mem_free(gd, sizeof(*gd));
mem_free(auth, sizeof(*auth));
return (TRUE);
}
static bool_t
svcauth_gss_wrap(SVCAUTH *auth, XDR *xdrs, xdrproc_t xdr_func, caddr_t xdr_ptr)
{
struct svc_rpc_gss_data *gd;
log_debug("in svcauth_gss_wrap()");
gd = SVCAUTH_PRIVATE(auth);
if (!gd->established || gd->sec.svc == RPCSEC_GSS_SVC_NONE) {
return ((*xdr_func)(xdrs, xdr_ptr));
}
return (xdr_rpc_gss_data(xdrs, xdr_func, xdr_ptr,
gd->ctx, gd->sec.qop,
gd->sec.svc, gd->seq));
}
static bool_t
svcauth_gss_unwrap(SVCAUTH *auth, XDR *xdrs, xdrproc_t xdr_func, caddr_t xdr_ptr)
{
struct svc_rpc_gss_data *gd;
log_debug("in svcauth_gss_unwrap()");
gd = SVCAUTH_PRIVATE(auth);
if (!gd->established || gd->sec.svc == RPCSEC_GSS_SVC_NONE) {
return ((*xdr_func)(xdrs, xdr_ptr));
}
return (xdr_rpc_gss_data(xdrs, xdr_func, xdr_ptr,
gd->ctx, gd->sec.qop,
gd->sec.svc, gd->seq));
}
char *
svcauth_gss_get_principal(SVCAUTH *auth)
{
struct svc_rpc_gss_data *gd;
char *pname;
gd = SVCAUTH_PRIVATE(auth);
if (gd->cname.length == 0 || gd->cname.length >= SIZE_MAX)
return (NULL);
if ((pname = malloc(gd->cname.length + 1)) == NULL)
return (NULL);
memcpy(pname, gd->cname.value, gd->cname.length);
pname[gd->cname.length] = '\0';
return (pname);
}
/*
* Function: svcauth_gss_set_log_badauth_func
*
* Purpose: sets the logging function called when an invalid RPC call
* arrives
*
* See functional specifications.
*/
void svcauth_gss_set_log_badauth_func(
auth_gssapi_log_badauth_func func,
caddr_t data)
{
log_badauth = func;
log_badauth_data = data;
}
void
svcauth_gss_set_log_badauth2_func(auth_gssapi_log_badauth2_func func,
caddr_t data)
{
log_badauth2 = func;
log_badauth2_data = data;
}
/*
* Function: svcauth_gss_set_log_badverf_func
*
* Purpose: sets the logging function called when an invalid RPC call
* arrives
*
* See functional specifications.
*/
void svcauth_gss_set_log_badverf_func(
auth_gssapi_log_badverf_func func,
caddr_t data)
{
log_badverf = func;
log_badverf_data = data;
}
/*
* Function: svcauth_gss_set_log_miscerr_func
*
* Purpose: sets the logging function called when a miscellaneous
* AUTH_GSSAPI error occurs
*
* See functional specifications.
*/
void svcauth_gss_set_log_miscerr_func(
auth_gssapi_log_miscerr_func func,
caddr_t data)
{
log_miscerr = func;
log_miscerr_data = data;
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_2375_1 |
crossvul-cpp_data_good_760_3 | /*
* IPv6 library code, needed by static components when full IPv6 support is
* not configured or static. These functions are needed by GSO/GRO implementation.
*/
#include <linux/export.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/addrconf.h>
#include <net/secure_seq.h>
#include <linux/netfilter.h>
static u32 __ipv6_select_ident(struct net *net,
const struct in6_addr *dst,
const struct in6_addr *src)
{
const struct {
struct in6_addr dst;
struct in6_addr src;
} __aligned(SIPHASH_ALIGNMENT) combined = {
.dst = *dst,
.src = *src,
};
u32 hash, id;
/* Note the following code is not safe, but this is okay. */
if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
get_random_bytes(&net->ipv4.ip_id_key,
sizeof(net->ipv4.ip_id_key));
hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key);
/* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
* set the hight order instead thus minimizing possible future
* collisions.
*/
id = ip_idents_reserve(hash, 1);
if (unlikely(!id))
id = 1 << 31;
return id;
}
/* This function exists only for tap drivers that must support broken
* clients requesting UFO without specifying an IPv6 fragment ID.
*
* This is similar to ipv6_select_ident() but we use an independent hash
* seed to limit information leakage.
*
* The network header must be set before calling this.
*/
__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
{
struct in6_addr buf[2];
struct in6_addr *addrs;
u32 id;
addrs = skb_header_pointer(skb,
skb_network_offset(skb) +
offsetof(struct ipv6hdr, saddr),
sizeof(buf), buf);
if (!addrs)
return 0;
id = __ipv6_select_ident(net, &addrs[1], &addrs[0]);
return htonl(id);
}
EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
__be32 ipv6_select_ident(struct net *net,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
u32 id;
id = __ipv6_select_ident(net, daddr, saddr);
return htonl(id);
}
EXPORT_SYMBOL(ipv6_select_ident);
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
{
unsigned int offset = sizeof(struct ipv6hdr);
unsigned int packet_len = skb_tail_pointer(skb) -
skb_network_header(skb);
int found_rhdr = 0;
*nexthdr = &ipv6_hdr(skb)->nexthdr;
while (offset <= packet_len) {
struct ipv6_opt_hdr *exthdr;
switch (**nexthdr) {
case NEXTHDR_HOP:
break;
case NEXTHDR_ROUTING:
found_rhdr = 1;
break;
case NEXTHDR_DEST:
#if IS_ENABLED(CONFIG_IPV6_MIP6)
if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
break;
#endif
if (found_rhdr)
return offset;
break;
default:
return offset;
}
if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
return -EINVAL;
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
offset += ipv6_optlen(exthdr);
if (offset > IPV6_MAXPLEN)
return -EINVAL;
*nexthdr = &exthdr->nexthdr;
}
return -EINVAL;
}
EXPORT_SYMBOL(ip6_find_1stfragopt);
#if IS_ENABLED(CONFIG_IPV6)
int ip6_dst_hoplimit(struct dst_entry *dst)
{
int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
if (hoplimit == 0) {
struct net_device *dev = dst->dev;
struct inet6_dev *idev;
rcu_read_lock();
idev = __in6_dev_get(dev);
if (idev)
hoplimit = idev->cnf.hop_limit;
else
hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
rcu_read_unlock();
}
return hoplimit;
}
EXPORT_SYMBOL(ip6_dst_hoplimit);
#endif
int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
int len;
len = skb->len - sizeof(struct ipv6hdr);
if (len > IPV6_MAXPLEN)
len = 0;
ipv6_hdr(skb)->payload_len = htons(len);
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
/* if egress device is enslaved to an L3 master device pass the
* skb to its handler for processing
*/
skb = l3mdev_ip6_out(sk, skb);
if (unlikely(!skb))
return 0;
skb->protocol = htons(ETH_P_IPV6);
return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
net, sk, skb, NULL, skb_dst(skb)->dev,
dst_output);
}
EXPORT_SYMBOL_GPL(__ip6_local_out);
int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
int err;
err = __ip6_local_out(net, sk, skb);
if (likely(err == 1))
err = dst_output(net, sk, skb);
return err;
}
EXPORT_SYMBOL_GPL(ip6_local_out);
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_760_3 |
crossvul-cpp_data_bad_3568_6 | /* -*- Mode: c; c-basic-offset: 2 -*-
*
* raptor_libxml.c - Raptor libxml functions
*
* Copyright (C) 2000-2009, David Beckett http://www.dajobe.org/
* Copyright (C) 2000-2004, University of Bristol, UK http://www.bristol.ac.uk/
*
* This package is Free Software and part of Redland http://librdf.org/
*
* It is licensed under the following three licenses as alternatives:
* 1. GNU Lesser General Public License (LGPL) V2.1 or any newer version
* 2. GNU General Public License (GPL) V2 or any newer version
* 3. Apache License, V2.0 or any newer version
*
* You may not use this file except in compliance with at least one of
* the above three licenses.
*
* See LICENSE.html or LICENSE.txt at the top of this package for the
* complete terms and further detail along with the license texts for
* the licenses in COPYING.LIB, COPYING and LICENSE-2.0.txt respectively.
*
*
*/
#ifdef HAVE_CONFIG_H
#include <raptor_config.h>
#endif
#ifdef WIN32
#include <win32_raptor_config.h>
#endif
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <stdarg.h>
#ifdef HAVE_ERRNO_H
#include <errno.h>
#endif
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
/* Raptor includes */
#include "raptor2.h"
#include "raptor_internal.h"
#ifdef RAPTOR_XML_LIBXML
/* prototypes */
static void raptor_libxml_warning(void* user_data, const char *msg, ...) RAPTOR_PRINTF_FORMAT(2, 3);
static void raptor_libxml_error_common(void* user_data, const char *msg, va_list args, const char *prefix, int is_fatal) RAPTOR_PRINTF_FORMAT(2, 0);
static void raptor_libxml_error(void *context, const char *msg, ...) RAPTOR_PRINTF_FORMAT(2, 3);
static void raptor_libxml_fatal_error(void *context, const char *msg, ...) RAPTOR_PRINTF_FORMAT(2, 3);
static void raptor_libxml_xmlStructuredError_handler_global(void *user_data, xmlErrorPtr err);
static void raptor_libxml_xmlStructuredError_handler_parsing(void *user_data, xmlErrorPtr err);
static const char* const xml_warning_prefix="XML parser warning - ";
static const char* const xml_error_prefix="XML parser error - ";
static const char* const xml_generic_error_prefix="XML error - ";
static const char* const xml_fatal_error_prefix="XML parser fatal error - ";
static const char* const xml_validation_error_prefix="XML parser validation error - ";
static const char* const xml_validation_warning_prefix="XML parser validation warning - ";
#ifdef HAVE_XMLSAX2INTERNALSUBSET
/* SAX2 - 2.6.0 or later */
#define libxml2_internalSubset xmlSAX2InternalSubset
#define libxml2_externalSubset xmlSAX2ExternalSubset
#define libxml2_isStandalone xmlSAX2IsStandalone
#define libxml2_hasInternalSubset xmlSAX2HasInternalSubset
#define libxml2_hasExternalSubset xmlSAX2HasExternalSubset
#define libxml2_resolveEntity xmlSAX2ResolveEntity
#define libxml2_getEntity xmlSAX2GetEntity
#define libxml2_getParameterEntity xmlSAX2GetParameterEntity
#define libxml2_entityDecl xmlSAX2EntityDecl
#define libxml2_unparsedEntityDecl xmlSAX2UnparsedEntityDecl
#define libxml2_startDocument xmlSAX2StartDocument
#define libxml2_endDocument xmlSAX2EndDocument
#else
/* SAX1 - before libxml2 2.6.0 */
#define libxml2_internalSubset internalSubset
#define libxml2_externalSubset externalSubset
#define libxml2_isStandalone isStandalone
#define libxml2_hasInternalSubset hasInternalSubset
#define libxml2_hasExternalSubset hasExternalSubset
#define libxml2_resolveEntity resolveEntity
#define libxml2_getEntity getEntity
#define libxml2_getParameterEntity getParameterEntity
#define libxml2_entityDecl entityDecl
#define libxml2_unparsedEntityDecl unparsedEntityDecl
#define libxml2_startDocument startDocument
#define libxml2_endDocument endDocument
#endif
static void
raptor_libxml_internalSubset(void* user_data, const xmlChar *name,
const xmlChar *ExternalID, const xmlChar *SystemID) {
raptor_sax2* sax2 = (raptor_sax2*)user_data;
libxml2_internalSubset(sax2->xc, name, ExternalID, SystemID);
}
#ifdef RAPTOR_LIBXML_XMLSAXHANDLER_EXTERNALSUBSET
static void
raptor_libxml_externalSubset(void* user_data, const xmlChar *name,
const xmlChar *ExternalID, const xmlChar *SystemID)
{
raptor_sax2* sax2 = (raptor_sax2*)user_data;
libxml2_externalSubset(sax2->xc, name, ExternalID, SystemID);
}
#endif
static int
raptor_libxml_isStandalone (void* user_data)
{
raptor_sax2* sax2 = (raptor_sax2*)user_data;
return libxml2_isStandalone(sax2->xc);
}
static int
raptor_libxml_hasInternalSubset (void* user_data)
{
raptor_sax2* sax2 = (raptor_sax2*)user_data;
return libxml2_hasInternalSubset(sax2->xc);
}
static int
raptor_libxml_hasExternalSubset (void* user_data)
{
raptor_sax2* sax2 = (raptor_sax2*)user_data;
return libxml2_hasExternalSubset(sax2->xc);
}
static xmlParserInputPtr
raptor_libxml_resolveEntity(void* user_data,
const xmlChar *publicId, const xmlChar *systemId) {
raptor_sax2* sax2 = (raptor_sax2*)user_data;
return libxml2_resolveEntity(sax2->xc, publicId, systemId);
}
static xmlEntityPtr
raptor_libxml_getEntity(void* user_data, const xmlChar *name) {
raptor_sax2* sax2 = (raptor_sax2*)user_data;
return libxml2_getEntity(sax2->xc, name);
}
static xmlEntityPtr
raptor_libxml_getParameterEntity(void* user_data, const xmlChar *name) {
raptor_sax2* sax2 = (raptor_sax2*)user_data;
return libxml2_getParameterEntity(sax2->xc, name);
}
static void
raptor_libxml_entityDecl(void* user_data, const xmlChar *name, int type,
const xmlChar *publicId, const xmlChar *systemId,
xmlChar *content) {
raptor_sax2* sax2 = (raptor_sax2*)user_data;
libxml2_entityDecl(sax2->xc, name, type, publicId, systemId, content);
}
static void
raptor_libxml_unparsedEntityDecl(void* user_data, const xmlChar *name,
const xmlChar *publicId, const xmlChar *systemId,
const xmlChar *notationName) {
raptor_sax2* sax2 = (raptor_sax2*)user_data;
libxml2_unparsedEntityDecl(sax2->xc, name, publicId, systemId, notationName);
}
static void
raptor_libxml_startDocument(void* user_data) {
raptor_sax2* sax2 = (raptor_sax2*)user_data;
libxml2_startDocument(sax2->xc);
}
static void
raptor_libxml_endDocument(void* user_data) {
raptor_sax2* sax2 = (raptor_sax2*)user_data;
xmlParserCtxtPtr xc = sax2->xc;
libxml2_endDocument(sax2->xc);
if(xc->myDoc) {
xmlFreeDoc(xc->myDoc);
xc->myDoc = NULL;
}
}
static void
raptor_libxml_set_document_locator(void* user_data, xmlSAXLocatorPtr loc)
{
raptor_sax2* sax2 = (raptor_sax2*)user_data;
sax2->loc = loc;
}
void
raptor_libxml_update_document_locator(raptor_sax2* sax2,
raptor_locator* locator)
{
/* for storing error info */
xmlSAXLocatorPtr loc = sax2 ? sax2->loc : NULL;
xmlParserCtxtPtr xc= sax2 ? sax2->xc : NULL;
if(xc && xc->inSubset)
return;
if(!locator)
return;
locator->line= -1;
locator->column= -1;
if(!xc)
return;
if(loc) {
locator->line = loc->getLineNumber(xc);
/* Seems to be broken */
/* locator->column = loc->getColumnNumber(xc); */
}
}
static void
raptor_libxml_warning(void* user_data, const char *msg, ...)
{
raptor_sax2* sax2 = NULL;
va_list args;
int prefix_length = RAPTOR_BAD_CAST(int, strlen(xml_warning_prefix));
int length;
char *nmsg;
int msg_len;
/* Work around libxml2 bug - sometimes the sax2->error
* returns a ctx, sometimes the userdata
*/
if(((raptor_sax2*)user_data)->magic == RAPTOR_LIBXML_MAGIC)
sax2 = (raptor_sax2*)user_data;
else
/* user_data is not userData */
sax2 = (raptor_sax2*)((xmlParserCtxtPtr)user_data)->userData;
va_start(args, msg);
raptor_libxml_update_document_locator(sax2, sax2->locator);
msg_len = RAPTOR_BAD_CAST(int, strlen(msg));
length = prefix_length + msg_len + 1;
nmsg = RAPTOR_MALLOC(char*, length);
if(nmsg) {
memcpy(nmsg, xml_warning_prefix, prefix_length); /* Do not copy NUL */
memcpy(nmsg + prefix_length, msg, msg_len + 1); /* Copy NUL */
if(nmsg[length-2] == '\n')
nmsg[length-2]='\0';
}
raptor_log_error_varargs(sax2->world,
RAPTOR_LOG_LEVEL_WARN,
sax2->locator,
nmsg ? nmsg : msg,
args);
if(nmsg)
RAPTOR_FREE(char*, nmsg);
va_end(args);
}
static void
raptor_libxml_error_common(void* user_data, const char *msg, va_list args,
const char *prefix, int is_fatal)
{
raptor_sax2* sax2 = NULL;
int prefix_length = RAPTOR_BAD_CAST(int, strlen(prefix));
int length;
char *nmsg;
int msg_len;
raptor_world* world = NULL;
raptor_locator* locator = NULL;
if(user_data) {
/* Work around libxml2 bug - sometimes the sax2->error
* returns a user_data, sometimes the userdata
*/
if(((raptor_sax2*)user_data)->magic == RAPTOR_LIBXML_MAGIC)
sax2 = (raptor_sax2*)user_data;
else
/* user_data is not userData */
sax2 = (raptor_sax2*)((xmlParserCtxtPtr)user_data)->userData;
}
if(sax2) {
world = sax2->world;
locator = sax2->locator;
if(locator)
raptor_libxml_update_document_locator(sax2, sax2->locator);
}
msg_len = RAPTOR_BAD_CAST(int, strlen(msg));
length = prefix_length + msg_len + 1;
nmsg = RAPTOR_MALLOC(char*, length);
if(nmsg) {
memcpy(nmsg, prefix, prefix_length); /* Do not copy NUL */
memcpy(nmsg + prefix_length, msg, msg_len + 1); /* Copy NUL */
if(nmsg[length-1] == '\n')
nmsg[length-1]='\0';
}
if(is_fatal)
raptor_log_error_varargs(world,
RAPTOR_LOG_LEVEL_FATAL,
locator,
nmsg ? nmsg : msg,
args);
else
raptor_log_error_varargs(world,
RAPTOR_LOG_LEVEL_ERROR,
locator,
nmsg ? nmsg : msg,
args);
if(nmsg)
RAPTOR_FREE(char*, nmsg);
}
static void
raptor_libxml_error(void* user_data, const char *msg, ...)
{
va_list args;
va_start(args, msg);
raptor_libxml_error_common(user_data, msg, args, xml_error_prefix, 0);
va_end(args);
}
void
raptor_libxml_generic_error(void* user_data, const char *msg, ...)
{
raptor_world* world = (raptor_world*)user_data;
va_list args;
const char* prefix = xml_generic_error_prefix;
int prefix_length = RAPTOR_BAD_CAST(int, strlen(prefix));
int length;
char *nmsg;
int msg_len;
va_start(args, msg);
msg_len = RAPTOR_BAD_CAST(int, strlen(msg));
length = prefix_length + msg_len + 1;
nmsg = RAPTOR_MALLOC(char*, length);
if(nmsg) {
memcpy(nmsg, prefix, prefix_length); /* Do not copy NUL */
memcpy(nmsg + prefix_length, msg, msg_len + 1); /* Copy NUL */
if(nmsg[length-1] == '\n')
nmsg[length-1]='\0';
}
raptor_log_error_varargs(world, RAPTOR_LOG_LEVEL_ERROR,
/* locator */ NULL,
nmsg ? nmsg : msg,
args);
if(nmsg)
RAPTOR_FREE(char*, nmsg);
va_end(args);
}
static void
raptor_libxml_fatal_error(void* user_data, const char *msg, ...)
{
va_list args;
va_start(args, msg);
raptor_libxml_error_common(user_data, msg, args, xml_fatal_error_prefix, 1);
va_end(args);
}
void
raptor_libxml_validation_error(void* user_data, const char *msg, ...)
{
va_list args;
va_start(args, msg);
raptor_libxml_error_common(user_data, msg, args,
xml_validation_error_prefix, 1);
va_end(args);
}
void
raptor_libxml_validation_warning(void* user_data, const char *msg, ...)
{
va_list args;
raptor_sax2* sax2 = (raptor_sax2*)user_data;
int prefix_length = RAPTOR_GOOD_CAST(int, strlen(xml_validation_warning_prefix));
int length;
char *nmsg;
int msg_len;
va_start(args, msg);
raptor_libxml_update_document_locator(sax2, sax2->locator);
msg_len = RAPTOR_BAD_CAST(int, strlen(msg));
length = prefix_length + msg_len + 1;
nmsg = RAPTOR_MALLOC(char*, length);
if(nmsg) {
memcpy(nmsg, xml_validation_warning_prefix, prefix_length); /* Do not copy NUL */
memcpy(nmsg + prefix_length, msg, msg_len + 1); /* Copy NUL */
if(nmsg[length-2] == '\n')
nmsg[length-2]='\0';
}
raptor_log_error_varargs(sax2->world,
RAPTOR_LOG_LEVEL_WARN,
sax2->locator,
nmsg ? nmsg : msg,
args);
if(nmsg)
RAPTOR_FREE(char*, nmsg);
va_end(args);
}
/*
* Initialise libxml for a particular SAX2 setup
*/
void
raptor_libxml_sax_init(raptor_sax2* sax2)
{
xmlSAXHandler *sax = &sax2->sax;
sax->internalSubset = raptor_libxml_internalSubset;
sax->isStandalone = raptor_libxml_isStandalone;
sax->hasInternalSubset = raptor_libxml_hasInternalSubset;
sax->hasExternalSubset = raptor_libxml_hasExternalSubset;
sax->resolveEntity = raptor_libxml_resolveEntity;
sax->getEntity = raptor_libxml_getEntity;
sax->getParameterEntity = raptor_libxml_getParameterEntity;
sax->entityDecl = raptor_libxml_entityDecl;
sax->attributeDecl = NULL; /* attributeDecl */
sax->elementDecl = NULL; /* elementDecl */
sax->notationDecl = NULL; /* notationDecl */
sax->unparsedEntityDecl = raptor_libxml_unparsedEntityDecl;
sax->setDocumentLocator = raptor_libxml_set_document_locator;
sax->startDocument = raptor_libxml_startDocument;
sax->endDocument = raptor_libxml_endDocument;
sax->startElement= raptor_sax2_start_element;
sax->endElement= raptor_sax2_end_element;
sax->reference = NULL; /* reference */
sax->characters= raptor_sax2_characters;
sax->cdataBlock= raptor_sax2_cdata; /* like <![CDATA[...]> */
sax->ignorableWhitespace= raptor_sax2_cdata;
sax->processingInstruction = NULL; /* processingInstruction */
sax->comment = raptor_sax2_comment; /* comment */
sax->warning = (warningSAXFunc)raptor_libxml_warning;
sax->error = (errorSAXFunc)raptor_libxml_error;
sax->fatalError = (fatalErrorSAXFunc)raptor_libxml_fatal_error;
sax->serror = (xmlStructuredErrorFunc)raptor_libxml_xmlStructuredError_handler_parsing;
#ifdef RAPTOR_LIBXML_XMLSAXHANDLER_EXTERNALSUBSET
sax->externalSubset = raptor_libxml_externalSubset;
#endif
#ifdef RAPTOR_LIBXML_XMLSAXHANDLER_INITIALIZED
sax->initialized = 1;
#endif
}
void
raptor_libxml_free(xmlParserCtxtPtr xc) {
libxml2_endDocument(xc);
if(xc->myDoc) {
xmlFreeDoc(xc->myDoc);
xc->myDoc = NULL;
}
xmlFreeParserCtxt(xc);
}
int
raptor_libxml_init(raptor_world* world)
{
xmlInitParser();
if(world->libxml_flags & RAPTOR_WORLD_FLAG_LIBXML_STRUCTURED_ERROR_SAVE) {
world->libxml_saved_structured_error_context = xmlGenericErrorContext;
world->libxml_saved_structured_error_handler = xmlStructuredError;
/* sets xmlGenericErrorContext and xmlStructuredError */
xmlSetStructuredErrorFunc(world,
(xmlStructuredErrorFunc)raptor_libxml_xmlStructuredError_handler_global);
}
if(world->libxml_flags & RAPTOR_WORLD_FLAG_LIBXML_GENERIC_ERROR_SAVE) {
world->libxml_saved_generic_error_context = xmlGenericErrorContext;
world->libxml_saved_generic_error_handler = xmlGenericError;
/* sets xmlGenericErrorContext and xmlGenericError */
xmlSetGenericErrorFunc(world,
(xmlGenericErrorFunc)raptor_libxml_generic_error);
}
return 0;
}
void
raptor_libxml_finish(raptor_world* world)
{
if(world->libxml_flags & RAPTOR_WORLD_FLAG_LIBXML_STRUCTURED_ERROR_SAVE)
xmlSetStructuredErrorFunc(world->libxml_saved_structured_error_context,
world->libxml_saved_structured_error_handler);
if(world->libxml_flags & RAPTOR_WORLD_FLAG_LIBXML_GENERIC_ERROR_SAVE)
xmlSetGenericErrorFunc(world->libxml_saved_generic_error_context,
world->libxml_saved_generic_error_handler);
xmlCleanupParser();
}
#if LIBXML_VERSION >= 20632
#define XML_LAST_DL XML_FROM_SCHEMATRONV
#else
#if LIBXML_VERSION >= 20621
#define XML_LAST_DL XML_FROM_I18N
#else
#if LIBXML_VERSION >= 20617
#define XML_LAST_DL XML_FROM_WRITER
#else
#if LIBXML_VERSION >= 20616
#define XML_LAST_DL XML_FROM_CHECK
#else
#if LIBXML_VERSION >= 20615
#define XML_LAST_DL XML_FROM_VALID
#else
#define XML_LAST_DL XML_FROM_XSLT
#endif
#endif
#endif
#endif
#endif
/* All other symbols not specifically below noted were added during
* the period 2-10 October 2003 which is before the minimum libxml2
* version 2.6.8 release date of Mar 23 2004.
*
* When the minimum libxml2 version goes up, the #ifdefs for
* older versions can be removed.
*/
static const char* const raptor_libxml_domain_labels[XML_LAST_DL+2]= {
NULL, /* XML_FROM_NONE */
"parser", /* XML_FROM_PARSER */
"tree", /* XML_FROM_TREE */
"namespace", /* XML_FROM_NAMESPACE */
"validity", /* XML_FROM_DTD */
"HTML parser", /* XML_FROM_HTML */
"memory", /* XML_FROM_MEMORY */
"output", /* XML_FROM_OUTPUT */
"I/O" , /* XML_FROM_IO */
"FTP", /* XML_FROM_FTP */
#if LIBXML_VERSION >= 20618
/* 2005-02-13 - v2.6.18 */
"HTTP", /* XML_FROM_HTTP */
#endif
"XInclude", /* XML_FROM_XINCLUDE */
"XPath", /* XML_FROM_XPATH */
"parser", /* XML_FROM_XPOINTER */
"regexp", /* XML_FROM_REGEXP */
"Schemas datatype", /* XML_FROM_DATATYPE */
"Schemas parser", /* XML_FROM_SCHEMASP */
"Schemas validity", /* XML_FROM_SCHEMASV */
"Relax-NG parser", /* XML_FROM_RELAXNGP */
"Relax-NG validity", /* XML_FROM_RELAXNGV */
"Catalog", /* XML_FROM_CATALOG */
"C14", /* XML_FROM_C14N */
"XSLT", /* XML_FROM_XSLT */
#if LIBXML_VERSION >= 20615
/* 2004-10-07 - v2.6.15 */
"validity", /* XML_FROM_VALID */
#endif
#if LIBXML_VERSION >= 20616
/* 2004-11-04 - v2.6.16 */
"checking", /* XML_FROM_CHECK */
#endif
#if LIBXML_VERSION >= 20617
/* 2005-01-04 - v2.6.17 */
"writer", /* XML_FROM_WRITER */
#endif
#if LIBXML_VERSION >= 20621
/* 2005-08-24 - v2.6.21 */
"module", /* XML_FROM_MODULE */
"encoding", /* XML_FROM_I18N */
#endif
#if LIBXML_VERSION >= 20632
/* 2008-04-08 - v2.6.32 */
"schematronv", /* XML_FROM_SCHEMATRONV */
#endif
NULL
};
static void
raptor_libxml_xmlStructuredError_handler_common(raptor_world *world,
raptor_locator *locator,
xmlErrorPtr err)
{
raptor_stringbuffer* sb;
char *nmsg;
raptor_log_level level = RAPTOR_LOG_LEVEL_ERROR;
if(err == NULL || err->code == XML_ERR_OK || err->level == XML_ERR_NONE)
return;
/* Do not warn about things with no location */
if(err->level == XML_ERR_WARNING && !err->file)
return;
/* XML fatal errors never cause an abort */
if(err->level == XML_ERR_FATAL)
err->level = XML_ERR_ERROR;
sb = raptor_new_stringbuffer();
if(err->domain != XML_FROM_HTML)
raptor_stringbuffer_append_counted_string(sb, (const unsigned char*)"XML ",
4, 1);
if(err->domain != XML_FROM_NONE && err->domain < XML_LAST_DL) {
const unsigned char* label;
label = (const unsigned char*)raptor_libxml_domain_labels[(int)err->domain];
raptor_stringbuffer_append_string(sb, label, 1);
raptor_stringbuffer_append_counted_string(sb,
(const unsigned char*)" ", 1, 1);
}
if(err->level == XML_ERR_WARNING)
raptor_stringbuffer_append_counted_string(sb,
(const unsigned char*)"warning: ",
9, 1);
else /* XML_ERR_ERROR or XML_ERR_FATAL */
raptor_stringbuffer_append_counted_string(sb, (const unsigned char*)"error: ",
7, 1);
if(err->message) {
unsigned char* msg;
size_t len;
msg = (unsigned char*)err->message;
len= strlen((const char*)msg);
if(len && msg[len-1] == '\n')
msg[--len]='\0';
raptor_stringbuffer_append_counted_string(sb, msg, len, 1);
}
#if LIBXML_VERSION >= 20618
/* 2005-02-13 - v2.6.18 */
/* str1 has the detailed HTTP error */
if(err->domain == XML_FROM_HTTP && err->str1) {
unsigned char* msg;
size_t len;
msg = (unsigned char*)err->str1;
len= strlen((const char*)msg);
if(len && msg[len-1] == '\n')
msg[--len]='\0';
raptor_stringbuffer_append_counted_string(sb, (const unsigned char*)" - ",
3, 1);
raptor_stringbuffer_append_counted_string(sb, msg, len, 1);
}
#endif
/* When err->domain == XML_FROM_XPATH then err->int1 is
* the offset into err->str1, the line with the error
*/
if(err->domain == XML_FROM_XPATH && err->str1) {
raptor_stringbuffer_append_counted_string(sb, (const unsigned char*)" in ",
4, 1);
raptor_stringbuffer_append_string(sb, (const unsigned char*)err->str1, 1);
}
nmsg = (char*)raptor_stringbuffer_as_string(sb);
if(err->level == XML_ERR_FATAL)
level = RAPTOR_LOG_LEVEL_FATAL;
else if(err->level == XML_ERR_ERROR)
level = RAPTOR_LOG_LEVEL_ERROR;
else
level = RAPTOR_LOG_LEVEL_WARN;
raptor_log_error(world, level, locator, nmsg);
raptor_free_stringbuffer(sb);
}
/* user_data is a raptor_world* */
static void
raptor_libxml_xmlStructuredError_handler_global(void *user_data,
xmlErrorPtr err)
{
raptor_world *world = NULL;
/* user_data may point to a raptor_world* */
if(user_data) {
world = (raptor_world*)user_data;
if(world->magic != RAPTOR2_WORLD_MAGIC)
world = NULL;
}
raptor_libxml_xmlStructuredError_handler_common(world, NULL, err);
}
/* user_data may be a raptor_sax2; err->ctxt->userData may point to a
* raptor_sax2* */
static void
raptor_libxml_xmlStructuredError_handler_parsing(void *user_data,
xmlErrorPtr err)
{
raptor_sax2* sax2 = NULL;
/* user_data may point to a raptor_sax2* */
if(user_data) {
sax2 = (raptor_sax2*)user_data;
if(sax2->magic != RAPTOR_LIBXML_MAGIC)
sax2 = NULL;
}
/* err->ctxt->userData may point to a raptor_sax2* */
if(err && err->ctxt) {
xmlParserCtxtPtr ctxt = (xmlParserCtxtPtr)err->ctxt;
if(ctxt->userData) {
sax2 = (raptor_sax2*)ctxt->userData;
if(sax2->magic != RAPTOR_LIBXML_MAGIC)
sax2 = NULL;
}
}
if(sax2)
raptor_libxml_xmlStructuredError_handler_common(sax2->world, sax2->locator,
err);
else
raptor_libxml_xmlStructuredError_handler_common(NULL, NULL, err);
}
/* end if RAPTOR_XML_LIBXML */
#endif
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_3568_6 |
crossvul-cpp_data_good_5697_0 | /*
* VMware vSockets Driver
*
* Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
/* Implementation notes:
*
* - There are two kinds of sockets: those created by user action (such as
* calling socket(2)) and those created by incoming connection request packets.
*
* - There are two "global" tables, one for bound sockets (sockets that have
* specified an address that they are responsible for) and one for connected
* sockets (sockets that have established a connection with another socket).
* These tables are "global" in that all sockets on the system are placed
* within them. - Note, though, that the bound table contains an extra entry
* for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
* that list. The bound table is used solely for lookup of sockets when packets
* are received and that's not necessary for SOCK_DGRAM sockets since we create
* a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM
* sockets out of the bound hash buckets will reduce the chance of collisions
* when looking for SOCK_STREAM sockets and prevents us from having to check the
* socket type in the hash table lookups.
*
* - Sockets created by user action will either be "client" sockets that
* initiate a connection or "server" sockets that listen for connections; we do
* not support simultaneous connects (two "client" sockets connecting).
*
* - "Server" sockets are referred to as listener sockets throughout this
* implementation because they are in the SS_LISTEN state. When a connection
* request is received (the second kind of socket mentioned above), we create a
* new socket and refer to it as a pending socket. These pending sockets are
* placed on the pending connection list of the listener socket. When future
* packets are received for the address the listener socket is bound to, we
* check if the source of the packet is from one that has an existing pending
* connection. If it does, we process the packet for the pending socket. When
* that socket reaches the connected state, it is removed from the listener
* socket's pending list and enqueued in the listener socket's accept queue.
* Callers of accept(2) will accept connected sockets from the listener socket's
* accept queue. If the socket cannot be accepted for some reason then it is
* marked rejected. Once the connection is accepted, it is owned by the user
* process and the responsibility for cleanup falls with that user process.
*
* - It is possible that these pending sockets will never reach the connected
* state; in fact, we may never receive another packet after the connection
* request. Because of this, we must schedule a cleanup function to run in the
* future, after some amount of time passes where a connection should have been
* established. This function ensures that the socket is off all lists so it
* cannot be retrieved, then drops all references to the socket so it is cleaned
* up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this
* function will also cleanup rejected sockets, those that reach the connected
* state but leave it before they have been accepted.
*
* - Sockets created by user action will be cleaned up when the user process
* calls close(2), causing our release implementation to be called. Our release
* implementation will perform some cleanup then drop the last reference so our
* sk_destruct implementation is invoked. Our sk_destruct implementation will
* perform additional cleanup that's common for both types of sockets.
*
* - A socket's reference count is what ensures that the structure won't be
* freed. Each entry in a list (such as the "global" bound and connected tables
* and the listener socket's pending list and connected queue) ensures a
* reference. When we defer work until process context and pass a socket as our
* argument, we must ensure the reference count is increased to ensure the
* socket isn't freed before the function is run; the deferred function will
* then drop the reference.
*/
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/cred.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/net.h>
#include <linux/poll.h>
#include <linux/skbuff.h>
#include <linux/smp.h>
#include <linux/socket.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <net/sock.h>
#include "af_vsock.h"
static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
static void vsock_sk_destruct(struct sock *sk);
static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
/* Protocol family. */
static struct proto vsock_proto = {
.name = "AF_VSOCK",
.owner = THIS_MODULE,
.obj_size = sizeof(struct vsock_sock),
};
/* The default peer timeout indicates how long we will wait for a peer response
* to a control message.
*/
#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
#define SS_LISTEN 255
static const struct vsock_transport *transport;
static DEFINE_MUTEX(vsock_register_mutex);
/**** EXPORTS ****/
/* Get the ID of the local context. This is transport dependent. */
int vm_sockets_get_local_cid(void)
{
return transport->get_local_cid();
}
EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid);
/**** UTILS ****/
/* Each bound VSocket is stored in the bind hash table and each connected
* VSocket is stored in the connected hash table.
*
* Unbound sockets are all put on the same list attached to the end of the hash
* table (vsock_unbound_sockets). Bound sockets are added to the hash table in
* the bucket that their local address hashes to (vsock_bound_sockets(addr)
* represents the list that addr hashes to).
*
* Specifically, we initialize the vsock_bind_table array to a size of
* VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
* vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
* vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function
* mods with VSOCK_HASH_SIZE - 1 to ensure this.
*/
#define VSOCK_HASH_SIZE 251
#define MAX_PORT_RETRIES 24
#define VSOCK_HASH(addr) ((addr)->svm_port % (VSOCK_HASH_SIZE - 1))
#define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
#define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE])
/* XXX This can probably be implemented in a better way. */
#define VSOCK_CONN_HASH(src, dst) \
(((src)->svm_cid ^ (dst)->svm_port) % (VSOCK_HASH_SIZE - 1))
#define vsock_connected_sockets(src, dst) \
(&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
#define vsock_connected_sockets_vsk(vsk) \
vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
static struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
static struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
static DEFINE_SPINLOCK(vsock_table_lock);
static __init void vsock_init_tables(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++)
INIT_LIST_HEAD(&vsock_bind_table[i]);
for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
INIT_LIST_HEAD(&vsock_connected_table[i]);
}
static void __vsock_insert_bound(struct list_head *list,
struct vsock_sock *vsk)
{
sock_hold(&vsk->sk);
list_add(&vsk->bound_table, list);
}
static void __vsock_insert_connected(struct list_head *list,
struct vsock_sock *vsk)
{
sock_hold(&vsk->sk);
list_add(&vsk->connected_table, list);
}
static void __vsock_remove_bound(struct vsock_sock *vsk)
{
list_del_init(&vsk->bound_table);
sock_put(&vsk->sk);
}
static void __vsock_remove_connected(struct vsock_sock *vsk)
{
list_del_init(&vsk->connected_table);
sock_put(&vsk->sk);
}
static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
{
struct vsock_sock *vsk;
list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table)
if (addr->svm_port == vsk->local_addr.svm_port)
return sk_vsock(vsk);
return NULL;
}
static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
struct sockaddr_vm *dst)
{
struct vsock_sock *vsk;
list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
connected_table) {
if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
dst->svm_port == vsk->local_addr.svm_port) {
return sk_vsock(vsk);
}
}
return NULL;
}
static bool __vsock_in_bound_table(struct vsock_sock *vsk)
{
return !list_empty(&vsk->bound_table);
}
static bool __vsock_in_connected_table(struct vsock_sock *vsk)
{
return !list_empty(&vsk->connected_table);
}
static void vsock_insert_unbound(struct vsock_sock *vsk)
{
spin_lock_bh(&vsock_table_lock);
__vsock_insert_bound(vsock_unbound_sockets, vsk);
spin_unlock_bh(&vsock_table_lock);
}
void vsock_insert_connected(struct vsock_sock *vsk)
{
struct list_head *list = vsock_connected_sockets(
&vsk->remote_addr, &vsk->local_addr);
spin_lock_bh(&vsock_table_lock);
__vsock_insert_connected(list, vsk);
spin_unlock_bh(&vsock_table_lock);
}
EXPORT_SYMBOL_GPL(vsock_insert_connected);
void vsock_remove_bound(struct vsock_sock *vsk)
{
spin_lock_bh(&vsock_table_lock);
__vsock_remove_bound(vsk);
spin_unlock_bh(&vsock_table_lock);
}
EXPORT_SYMBOL_GPL(vsock_remove_bound);
void vsock_remove_connected(struct vsock_sock *vsk)
{
spin_lock_bh(&vsock_table_lock);
__vsock_remove_connected(vsk);
spin_unlock_bh(&vsock_table_lock);
}
EXPORT_SYMBOL_GPL(vsock_remove_connected);
struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr)
{
struct sock *sk;
spin_lock_bh(&vsock_table_lock);
sk = __vsock_find_bound_socket(addr);
if (sk)
sock_hold(sk);
spin_unlock_bh(&vsock_table_lock);
return sk;
}
EXPORT_SYMBOL_GPL(vsock_find_bound_socket);
struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
struct sockaddr_vm *dst)
{
struct sock *sk;
spin_lock_bh(&vsock_table_lock);
sk = __vsock_find_connected_socket(src, dst);
if (sk)
sock_hold(sk);
spin_unlock_bh(&vsock_table_lock);
return sk;
}
EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
static bool vsock_in_bound_table(struct vsock_sock *vsk)
{
bool ret;
spin_lock_bh(&vsock_table_lock);
ret = __vsock_in_bound_table(vsk);
spin_unlock_bh(&vsock_table_lock);
return ret;
}
static bool vsock_in_connected_table(struct vsock_sock *vsk)
{
bool ret;
spin_lock_bh(&vsock_table_lock);
ret = __vsock_in_connected_table(vsk);
spin_unlock_bh(&vsock_table_lock);
return ret;
}
void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
{
int i;
spin_lock_bh(&vsock_table_lock);
for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
struct vsock_sock *vsk;
list_for_each_entry(vsk, &vsock_connected_table[i],
connected_table);
fn(sk_vsock(vsk));
}
spin_unlock_bh(&vsock_table_lock);
}
EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket);
void vsock_add_pending(struct sock *listener, struct sock *pending)
{
struct vsock_sock *vlistener;
struct vsock_sock *vpending;
vlistener = vsock_sk(listener);
vpending = vsock_sk(pending);
sock_hold(pending);
sock_hold(listener);
list_add_tail(&vpending->pending_links, &vlistener->pending_links);
}
EXPORT_SYMBOL_GPL(vsock_add_pending);
void vsock_remove_pending(struct sock *listener, struct sock *pending)
{
struct vsock_sock *vpending = vsock_sk(pending);
list_del_init(&vpending->pending_links);
sock_put(listener);
sock_put(pending);
}
EXPORT_SYMBOL_GPL(vsock_remove_pending);
void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
{
struct vsock_sock *vlistener;
struct vsock_sock *vconnected;
vlistener = vsock_sk(listener);
vconnected = vsock_sk(connected);
sock_hold(connected);
sock_hold(listener);
list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue);
}
EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
static struct sock *vsock_dequeue_accept(struct sock *listener)
{
struct vsock_sock *vlistener;
struct vsock_sock *vconnected;
vlistener = vsock_sk(listener);
if (list_empty(&vlistener->accept_queue))
return NULL;
vconnected = list_entry(vlistener->accept_queue.next,
struct vsock_sock, accept_queue);
list_del_init(&vconnected->accept_queue);
sock_put(listener);
/* The caller will need a reference on the connected socket so we let
* it call sock_put().
*/
return sk_vsock(vconnected);
}
static bool vsock_is_accept_queue_empty(struct sock *sk)
{
struct vsock_sock *vsk = vsock_sk(sk);
return list_empty(&vsk->accept_queue);
}
static bool vsock_is_pending(struct sock *sk)
{
struct vsock_sock *vsk = vsock_sk(sk);
return !list_empty(&vsk->pending_links);
}
static int vsock_send_shutdown(struct sock *sk, int mode)
{
return transport->shutdown(vsock_sk(sk), mode);
}
void vsock_pending_work(struct work_struct *work)
{
struct sock *sk;
struct sock *listener;
struct vsock_sock *vsk;
bool cleanup;
vsk = container_of(work, struct vsock_sock, dwork.work);
sk = sk_vsock(vsk);
listener = vsk->listener;
cleanup = true;
lock_sock(listener);
lock_sock(sk);
if (vsock_is_pending(sk)) {
vsock_remove_pending(listener, sk);
} else if (!vsk->rejected) {
/* We are not on the pending list and accept() did not reject
* us, so we must have been accepted by our user process. We
* just need to drop our references to the sockets and be on
* our way.
*/
cleanup = false;
goto out;
}
listener->sk_ack_backlog--;
/* We need to remove ourself from the global connected sockets list so
* incoming packets can't find this socket, and to reduce the reference
* count.
*/
if (vsock_in_connected_table(vsk))
vsock_remove_connected(vsk);
sk->sk_state = SS_FREE;
out:
release_sock(sk);
release_sock(listener);
if (cleanup)
sock_put(sk);
sock_put(sk);
sock_put(listener);
}
EXPORT_SYMBOL_GPL(vsock_pending_work);
/**** SOCKET OPERATIONS ****/
static int __vsock_bind_stream(struct vsock_sock *vsk,
struct sockaddr_vm *addr)
{
static u32 port = LAST_RESERVED_PORT + 1;
struct sockaddr_vm new_addr;
vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
if (addr->svm_port == VMADDR_PORT_ANY) {
bool found = false;
unsigned int i;
for (i = 0; i < MAX_PORT_RETRIES; i++) {
if (port <= LAST_RESERVED_PORT)
port = LAST_RESERVED_PORT + 1;
new_addr.svm_port = port++;
if (!__vsock_find_bound_socket(&new_addr)) {
found = true;
break;
}
}
if (!found)
return -EADDRNOTAVAIL;
} else {
/* If port is in reserved range, ensure caller
* has necessary privileges.
*/
if (addr->svm_port <= LAST_RESERVED_PORT &&
!capable(CAP_NET_BIND_SERVICE)) {
return -EACCES;
}
if (__vsock_find_bound_socket(&new_addr))
return -EADDRINUSE;
}
vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
/* Remove stream sockets from the unbound list and add them to the hash
* table for easy lookup by its address. The unbound list is simply an
* extra entry at the end of the hash table, a trick used by AF_UNIX.
*/
__vsock_remove_bound(vsk);
__vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
return 0;
}
static int __vsock_bind_dgram(struct vsock_sock *vsk,
struct sockaddr_vm *addr)
{
return transport->dgram_bind(vsk, addr);
}
static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
{
struct vsock_sock *vsk = vsock_sk(sk);
u32 cid;
int retval;
/* First ensure this socket isn't already bound. */
if (vsock_addr_bound(&vsk->local_addr))
return -EINVAL;
/* Now bind to the provided address or select appropriate values if
* none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
* like AF_INET prevents binding to a non-local IP address (in most
* cases), we only allow binding to the local CID.
*/
cid = transport->get_local_cid();
if (addr->svm_cid != cid && addr->svm_cid != VMADDR_CID_ANY)
return -EADDRNOTAVAIL;
switch (sk->sk_socket->type) {
case SOCK_STREAM:
spin_lock_bh(&vsock_table_lock);
retval = __vsock_bind_stream(vsk, addr);
spin_unlock_bh(&vsock_table_lock);
break;
case SOCK_DGRAM:
retval = __vsock_bind_dgram(vsk, addr);
break;
default:
retval = -EINVAL;
break;
}
return retval;
}
struct sock *__vsock_create(struct net *net,
struct socket *sock,
struct sock *parent,
gfp_t priority,
unsigned short type)
{
struct sock *sk;
struct vsock_sock *psk;
struct vsock_sock *vsk;
sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto);
if (!sk)
return NULL;
sock_init_data(sock, sk);
/* sk->sk_type is normally set in sock_init_data, but only if sock is
* non-NULL. We make sure that our sockets always have a type by
* setting it here if needed.
*/
if (!sock)
sk->sk_type = type;
vsk = vsock_sk(sk);
vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
sk->sk_destruct = vsock_sk_destruct;
sk->sk_backlog_rcv = vsock_queue_rcv_skb;
sk->sk_state = 0;
sock_reset_flag(sk, SOCK_DONE);
INIT_LIST_HEAD(&vsk->bound_table);
INIT_LIST_HEAD(&vsk->connected_table);
vsk->listener = NULL;
INIT_LIST_HEAD(&vsk->pending_links);
INIT_LIST_HEAD(&vsk->accept_queue);
vsk->rejected = false;
vsk->sent_request = false;
vsk->ignore_connecting_rst = false;
vsk->peer_shutdown = 0;
psk = parent ? vsock_sk(parent) : NULL;
if (parent) {
vsk->trusted = psk->trusted;
vsk->owner = get_cred(psk->owner);
vsk->connect_timeout = psk->connect_timeout;
} else {
vsk->trusted = capable(CAP_NET_ADMIN);
vsk->owner = get_current_cred();
vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
}
if (transport->init(vsk, psk) < 0) {
sk_free(sk);
return NULL;
}
if (sock)
vsock_insert_unbound(vsk);
return sk;
}
EXPORT_SYMBOL_GPL(__vsock_create);
static void __vsock_release(struct sock *sk)
{
if (sk) {
struct sk_buff *skb;
struct sock *pending;
struct vsock_sock *vsk;
vsk = vsock_sk(sk);
pending = NULL; /* Compiler warning. */
if (vsock_in_bound_table(vsk))
vsock_remove_bound(vsk);
if (vsock_in_connected_table(vsk))
vsock_remove_connected(vsk);
transport->release(vsk);
lock_sock(sk);
sock_orphan(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
while ((skb = skb_dequeue(&sk->sk_receive_queue)))
kfree_skb(skb);
/* Clean up any sockets that never were accepted. */
while ((pending = vsock_dequeue_accept(sk)) != NULL) {
__vsock_release(pending);
sock_put(pending);
}
release_sock(sk);
sock_put(sk);
}
}
static void vsock_sk_destruct(struct sock *sk)
{
struct vsock_sock *vsk = vsock_sk(sk);
transport->destruct(vsk);
/* When clearing these addresses, there's no need to set the family and
* possibly register the address family with the kernel.
*/
vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
put_cred(vsk->owner);
}
static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int err;
err = sock_queue_rcv_skb(sk, skb);
if (err)
kfree_skb(skb);
return err;
}
s64 vsock_stream_has_data(struct vsock_sock *vsk)
{
return transport->stream_has_data(vsk);
}
EXPORT_SYMBOL_GPL(vsock_stream_has_data);
s64 vsock_stream_has_space(struct vsock_sock *vsk)
{
return transport->stream_has_space(vsk);
}
EXPORT_SYMBOL_GPL(vsock_stream_has_space);
static int vsock_release(struct socket *sock)
{
__vsock_release(sock->sk);
sock->sk = NULL;
sock->state = SS_FREE;
return 0;
}
static int
vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
int err;
struct sock *sk;
struct sockaddr_vm *vm_addr;
sk = sock->sk;
if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0)
return -EINVAL;
lock_sock(sk);
err = __vsock_bind(sk, vm_addr);
release_sock(sk);
return err;
}
static int vsock_getname(struct socket *sock,
struct sockaddr *addr, int *addr_len, int peer)
{
int err;
struct sock *sk;
struct vsock_sock *vsk;
struct sockaddr_vm *vm_addr;
sk = sock->sk;
vsk = vsock_sk(sk);
err = 0;
lock_sock(sk);
if (peer) {
if (sock->state != SS_CONNECTED) {
err = -ENOTCONN;
goto out;
}
vm_addr = &vsk->remote_addr;
} else {
vm_addr = &vsk->local_addr;
}
if (!vm_addr) {
err = -EINVAL;
goto out;
}
/* sys_getsockname() and sys_getpeername() pass us a
* MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
* that macro is defined in socket.c instead of .h, so we hardcode its
* value here.
*/
BUILD_BUG_ON(sizeof(*vm_addr) > 128);
memcpy(addr, vm_addr, sizeof(*vm_addr));
*addr_len = sizeof(*vm_addr);
out:
release_sock(sk);
return err;
}
static int vsock_shutdown(struct socket *sock, int mode)
{
int err;
struct sock *sk;
/* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
* RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
* here like the other address families do. Note also that the
* increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
* which is what we want.
*/
mode++;
if ((mode & ~SHUTDOWN_MASK) || !mode)
return -EINVAL;
/* If this is a STREAM socket and it is not connected then bail out
* immediately. If it is a DGRAM socket then we must first kick the
* socket so that it wakes up from any sleeping calls, for example
* recv(), and then afterwards return the error.
*/
sk = sock->sk;
if (sock->state == SS_UNCONNECTED) {
err = -ENOTCONN;
if (sk->sk_type == SOCK_STREAM)
return err;
} else {
sock->state = SS_DISCONNECTING;
err = 0;
}
/* Receive and send shutdowns are treated alike. */
mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
if (mode) {
lock_sock(sk);
sk->sk_shutdown |= mode;
sk->sk_state_change(sk);
release_sock(sk);
if (sk->sk_type == SOCK_STREAM) {
sock_reset_flag(sk, SOCK_DONE);
vsock_send_shutdown(sk, mode);
}
}
return err;
}
static unsigned int vsock_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk;
unsigned int mask;
struct vsock_sock *vsk;
sk = sock->sk;
vsk = vsock_sk(sk);
poll_wait(file, sk_sleep(sk), wait);
mask = 0;
if (sk->sk_err)
/* Signify that there has been an error on this socket. */
mask |= POLLERR;
/* INET sockets treat local write shutdown and peer write shutdown as a
* case of POLLHUP set.
*/
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
((sk->sk_shutdown & SEND_SHUTDOWN) &&
(vsk->peer_shutdown & SEND_SHUTDOWN))) {
mask |= POLLHUP;
}
if (sk->sk_shutdown & RCV_SHUTDOWN ||
vsk->peer_shutdown & SEND_SHUTDOWN) {
mask |= POLLRDHUP;
}
if (sock->type == SOCK_DGRAM) {
/* For datagram sockets we can read if there is something in
* the queue and write as long as the socket isn't shutdown for
* sending.
*/
if (!skb_queue_empty(&sk->sk_receive_queue) ||
(sk->sk_shutdown & RCV_SHUTDOWN)) {
mask |= POLLIN | POLLRDNORM;
}
if (!(sk->sk_shutdown & SEND_SHUTDOWN))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
} else if (sock->type == SOCK_STREAM) {
lock_sock(sk);
/* Listening sockets that have connections in their accept
* queue can be read.
*/
if (sk->sk_state == SS_LISTEN
&& !vsock_is_accept_queue_empty(sk))
mask |= POLLIN | POLLRDNORM;
/* If there is something in the queue then we can read. */
if (transport->stream_is_active(vsk) &&
!(sk->sk_shutdown & RCV_SHUTDOWN)) {
bool data_ready_now = false;
int ret = transport->notify_poll_in(
vsk, 1, &data_ready_now);
if (ret < 0) {
mask |= POLLERR;
} else {
if (data_ready_now)
mask |= POLLIN | POLLRDNORM;
}
}
/* Sockets whose connections have been closed, reset, or
* terminated should also be considered read, and we check the
* shutdown flag for that.
*/
if (sk->sk_shutdown & RCV_SHUTDOWN ||
vsk->peer_shutdown & SEND_SHUTDOWN) {
mask |= POLLIN | POLLRDNORM;
}
/* Connected sockets that can produce data can be written. */
if (sk->sk_state == SS_CONNECTED) {
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
bool space_avail_now = false;
int ret = transport->notify_poll_out(
vsk, 1, &space_avail_now);
if (ret < 0) {
mask |= POLLERR;
} else {
if (space_avail_now)
/* Remove POLLWRBAND since INET
* sockets are not setting it.
*/
mask |= POLLOUT | POLLWRNORM;
}
}
}
/* Simulate INET socket poll behaviors, which sets
* POLLOUT|POLLWRNORM when peer is closed and nothing to read,
* but local send is not shutdown.
*/
if (sk->sk_state == SS_UNCONNECTED) {
if (!(sk->sk_shutdown & SEND_SHUTDOWN))
mask |= POLLOUT | POLLWRNORM;
}
release_sock(sk);
}
return mask;
}
static int vsock_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
int err;
struct sock *sk;
struct vsock_sock *vsk;
struct sockaddr_vm *remote_addr;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
/* For now, MSG_DONTWAIT is always assumed... */
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
lock_sock(sk);
if (!vsock_addr_bound(&vsk->local_addr)) {
struct sockaddr_vm local_addr;
vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
err = __vsock_bind(sk, &local_addr);
if (err != 0)
goto out;
}
/* If the provided message contains an address, use that. Otherwise
* fall back on the socket's remote handle (if it has been connected).
*/
if (msg->msg_name &&
vsock_addr_cast(msg->msg_name, msg->msg_namelen,
&remote_addr) == 0) {
/* Ensure this address is of the right type and is a valid
* destination.
*/
if (remote_addr->svm_cid == VMADDR_CID_ANY)
remote_addr->svm_cid = transport->get_local_cid();
if (!vsock_addr_bound(remote_addr)) {
err = -EINVAL;
goto out;
}
} else if (sock->state == SS_CONNECTED) {
remote_addr = &vsk->remote_addr;
if (remote_addr->svm_cid == VMADDR_CID_ANY)
remote_addr->svm_cid = transport->get_local_cid();
/* XXX Should connect() or this function ensure remote_addr is
* bound?
*/
if (!vsock_addr_bound(&vsk->remote_addr)) {
err = -EINVAL;
goto out;
}
} else {
err = -EINVAL;
goto out;
}
if (!transport->dgram_allow(remote_addr->svm_cid,
remote_addr->svm_port)) {
err = -EINVAL;
goto out;
}
err = transport->dgram_enqueue(vsk, remote_addr, msg->msg_iov, len);
out:
release_sock(sk);
return err;
}
static int vsock_dgram_connect(struct socket *sock,
struct sockaddr *addr, int addr_len, int flags)
{
int err;
struct sock *sk;
struct vsock_sock *vsk;
struct sockaddr_vm *remote_addr;
sk = sock->sk;
vsk = vsock_sk(sk);
err = vsock_addr_cast(addr, addr_len, &remote_addr);
if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
lock_sock(sk);
vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY,
VMADDR_PORT_ANY);
sock->state = SS_UNCONNECTED;
release_sock(sk);
return 0;
} else if (err != 0)
return -EINVAL;
lock_sock(sk);
if (!vsock_addr_bound(&vsk->local_addr)) {
struct sockaddr_vm local_addr;
vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
err = __vsock_bind(sk, &local_addr);
if (err != 0)
goto out;
}
if (!transport->dgram_allow(remote_addr->svm_cid,
remote_addr->svm_port)) {
err = -EINVAL;
goto out;
}
memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
sock->state = SS_CONNECTED;
out:
release_sock(sk);
return err;
}
static int vsock_dgram_recvmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
return transport->dgram_dequeue(kiocb, vsock_sk(sock->sk), msg, len,
flags);
}
static const struct proto_ops vsock_dgram_ops = {
.family = PF_VSOCK,
.owner = THIS_MODULE,
.release = vsock_release,
.bind = vsock_bind,
.connect = vsock_dgram_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = vsock_getname,
.poll = vsock_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = vsock_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = vsock_dgram_sendmsg,
.recvmsg = vsock_dgram_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static void vsock_connect_timeout(struct work_struct *work)
{
struct sock *sk;
struct vsock_sock *vsk;
vsk = container_of(work, struct vsock_sock, dwork.work);
sk = sk_vsock(vsk);
lock_sock(sk);
if (sk->sk_state == SS_CONNECTING &&
(sk->sk_shutdown != SHUTDOWN_MASK)) {
sk->sk_state = SS_UNCONNECTED;
sk->sk_err = ETIMEDOUT;
sk->sk_error_report(sk);
}
release_sock(sk);
sock_put(sk);
}
static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
int addr_len, int flags)
{
int err;
struct sock *sk;
struct vsock_sock *vsk;
struct sockaddr_vm *remote_addr;
long timeout;
DEFINE_WAIT(wait);
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
lock_sock(sk);
/* XXX AF_UNSPEC should make us disconnect like AF_INET. */
switch (sock->state) {
case SS_CONNECTED:
err = -EISCONN;
goto out;
case SS_DISCONNECTING:
err = -EINVAL;
goto out;
case SS_CONNECTING:
/* This continues on so we can move sock into the SS_CONNECTED
* state once the connection has completed (at which point err
* will be set to zero also). Otherwise, we will either wait
* for the connection or return -EALREADY should this be a
* non-blocking call.
*/
err = -EALREADY;
break;
default:
if ((sk->sk_state == SS_LISTEN) ||
vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
err = -EINVAL;
goto out;
}
/* The hypervisor and well-known contexts do not have socket
* endpoints.
*/
if (!transport->stream_allow(remote_addr->svm_cid,
remote_addr->svm_port)) {
err = -ENETUNREACH;
goto out;
}
/* Set the remote address that we are connecting to. */
memcpy(&vsk->remote_addr, remote_addr,
sizeof(vsk->remote_addr));
/* Autobind this socket to the local address if necessary. */
if (!vsock_addr_bound(&vsk->local_addr)) {
struct sockaddr_vm local_addr;
vsock_addr_init(&local_addr, VMADDR_CID_ANY,
VMADDR_PORT_ANY);
err = __vsock_bind(sk, &local_addr);
if (err != 0)
goto out;
}
sk->sk_state = SS_CONNECTING;
err = transport->connect(vsk);
if (err < 0)
goto out;
/* Mark sock as connecting and set the error code to in
* progress in case this is a non-blocking connect.
*/
sock->state = SS_CONNECTING;
err = -EINPROGRESS;
}
/* The receive path will handle all communication until we are able to
* enter the connected state. Here we wait for the connection to be
* completed or a notification of an error.
*/
timeout = vsk->connect_timeout;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
while (sk->sk_state != SS_CONNECTED && sk->sk_err == 0) {
if (flags & O_NONBLOCK) {
/* If we're not going to block, we schedule a timeout
* function to generate a timeout on the connection
* attempt, in case the peer doesn't respond in a
* timely manner. We hold on to the socket until the
* timeout fires.
*/
sock_hold(sk);
INIT_DELAYED_WORK(&vsk->dwork,
vsock_connect_timeout);
schedule_delayed_work(&vsk->dwork, timeout);
/* Skip ahead to preserve error code set above. */
goto out_wait;
}
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
goto out_wait_error;
} else if (timeout == 0) {
err = -ETIMEDOUT;
goto out_wait_error;
}
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
if (sk->sk_err) {
err = -sk->sk_err;
goto out_wait_error;
} else
err = 0;
out_wait:
finish_wait(sk_sleep(sk), &wait);
out:
release_sock(sk);
return err;
out_wait_error:
sk->sk_state = SS_UNCONNECTED;
sock->state = SS_UNCONNECTED;
goto out_wait;
}
static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sock *listener;
int err;
struct sock *connected;
struct vsock_sock *vconnected;
long timeout;
DEFINE_WAIT(wait);
err = 0;
listener = sock->sk;
lock_sock(listener);
if (sock->type != SOCK_STREAM) {
err = -EOPNOTSUPP;
goto out;
}
if (listener->sk_state != SS_LISTEN) {
err = -EINVAL;
goto out;
}
/* Wait for children sockets to appear; these are the new sockets
* created upon connection establishment.
*/
timeout = sock_sndtimeo(listener, flags & O_NONBLOCK);
prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
while ((connected = vsock_dequeue_accept(listener)) == NULL &&
listener->sk_err == 0) {
release_sock(listener);
timeout = schedule_timeout(timeout);
lock_sock(listener);
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
goto out_wait;
} else if (timeout == 0) {
err = -EAGAIN;
goto out_wait;
}
prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
}
if (listener->sk_err)
err = -listener->sk_err;
if (connected) {
listener->sk_ack_backlog--;
lock_sock(connected);
vconnected = vsock_sk(connected);
/* If the listener socket has received an error, then we should
* reject this socket and return. Note that we simply mark the
* socket rejected, drop our reference, and let the cleanup
* function handle the cleanup; the fact that we found it in
* the listener's accept queue guarantees that the cleanup
* function hasn't run yet.
*/
if (err) {
vconnected->rejected = true;
release_sock(connected);
sock_put(connected);
goto out_wait;
}
newsock->state = SS_CONNECTED;
sock_graft(connected, newsock);
release_sock(connected);
sock_put(connected);
}
out_wait:
finish_wait(sk_sleep(listener), &wait);
out:
release_sock(listener);
return err;
}
static int vsock_listen(struct socket *sock, int backlog)
{
int err;
struct sock *sk;
struct vsock_sock *vsk;
sk = sock->sk;
lock_sock(sk);
if (sock->type != SOCK_STREAM) {
err = -EOPNOTSUPP;
goto out;
}
if (sock->state != SS_UNCONNECTED) {
err = -EINVAL;
goto out;
}
vsk = vsock_sk(sk);
if (!vsock_addr_bound(&vsk->local_addr)) {
err = -EINVAL;
goto out;
}
sk->sk_max_ack_backlog = backlog;
sk->sk_state = SS_LISTEN;
err = 0;
out:
release_sock(sk);
return err;
}
static int vsock_stream_setsockopt(struct socket *sock,
int level,
int optname,
char __user *optval,
unsigned int optlen)
{
int err;
struct sock *sk;
struct vsock_sock *vsk;
u64 val;
if (level != AF_VSOCK)
return -ENOPROTOOPT;
#define COPY_IN(_v) \
do { \
if (optlen < sizeof(_v)) { \
err = -EINVAL; \
goto exit; \
} \
if (copy_from_user(&_v, optval, sizeof(_v)) != 0) { \
err = -EFAULT; \
goto exit; \
} \
} while (0)
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
lock_sock(sk);
switch (optname) {
case SO_VM_SOCKETS_BUFFER_SIZE:
COPY_IN(val);
transport->set_buffer_size(vsk, val);
break;
case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
COPY_IN(val);
transport->set_max_buffer_size(vsk, val);
break;
case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
COPY_IN(val);
transport->set_min_buffer_size(vsk, val);
break;
case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
struct timeval tv;
COPY_IN(tv);
if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
vsk->connect_timeout = tv.tv_sec * HZ +
DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ));
if (vsk->connect_timeout == 0)
vsk->connect_timeout =
VSOCK_DEFAULT_CONNECT_TIMEOUT;
} else {
err = -ERANGE;
}
break;
}
default:
err = -ENOPROTOOPT;
break;
}
#undef COPY_IN
exit:
release_sock(sk);
return err;
}
static int vsock_stream_getsockopt(struct socket *sock,
int level, int optname,
char __user *optval,
int __user *optlen)
{
int err;
int len;
struct sock *sk;
struct vsock_sock *vsk;
u64 val;
if (level != AF_VSOCK)
return -ENOPROTOOPT;
err = get_user(len, optlen);
if (err != 0)
return err;
#define COPY_OUT(_v) \
do { \
if (len < sizeof(_v)) \
return -EINVAL; \
\
len = sizeof(_v); \
if (copy_to_user(optval, &_v, len) != 0) \
return -EFAULT; \
\
} while (0)
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
switch (optname) {
case SO_VM_SOCKETS_BUFFER_SIZE:
val = transport->get_buffer_size(vsk);
COPY_OUT(val);
break;
case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
val = transport->get_max_buffer_size(vsk);
COPY_OUT(val);
break;
case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
val = transport->get_min_buffer_size(vsk);
COPY_OUT(val);
break;
case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
struct timeval tv;
tv.tv_sec = vsk->connect_timeout / HZ;
tv.tv_usec =
(vsk->connect_timeout -
tv.tv_sec * HZ) * (1000000 / HZ);
COPY_OUT(tv);
break;
}
default:
return -ENOPROTOOPT;
}
err = put_user(len, optlen);
if (err != 0)
return -EFAULT;
#undef COPY_OUT
return 0;
}
static int vsock_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk;
struct vsock_sock *vsk;
ssize_t total_written;
long timeout;
int err;
struct vsock_transport_send_notify_data send_data;
DEFINE_WAIT(wait);
sk = sock->sk;
vsk = vsock_sk(sk);
total_written = 0;
err = 0;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
lock_sock(sk);
/* Callers should not provide a destination with stream sockets. */
if (msg->msg_namelen) {
err = sk->sk_state == SS_CONNECTED ? -EISCONN : -EOPNOTSUPP;
goto out;
}
/* Send data only if both sides are not shutdown in the direction. */
if (sk->sk_shutdown & SEND_SHUTDOWN ||
vsk->peer_shutdown & RCV_SHUTDOWN) {
err = -EPIPE;
goto out;
}
if (sk->sk_state != SS_CONNECTED ||
!vsock_addr_bound(&vsk->local_addr)) {
err = -ENOTCONN;
goto out;
}
if (!vsock_addr_bound(&vsk->remote_addr)) {
err = -EDESTADDRREQ;
goto out;
}
/* Wait for room in the produce queue to enqueue our user's data. */
timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
err = transport->notify_send_init(vsk, &send_data);
if (err < 0)
goto out;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
while (total_written < len) {
ssize_t written;
while (vsock_stream_has_space(vsk) == 0 &&
sk->sk_err == 0 &&
!(sk->sk_shutdown & SEND_SHUTDOWN) &&
!(vsk->peer_shutdown & RCV_SHUTDOWN)) {
/* Don't wait for non-blocking sockets. */
if (timeout == 0) {
err = -EAGAIN;
goto out_wait;
}
err = transport->notify_send_pre_block(vsk, &send_data);
if (err < 0)
goto out_wait;
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
goto out_wait;
} else if (timeout == 0) {
err = -EAGAIN;
goto out_wait;
}
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
}
/* These checks occur both as part of and after the loop
* conditional since we need to check before and after
* sleeping.
*/
if (sk->sk_err) {
err = -sk->sk_err;
goto out_wait;
} else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
(vsk->peer_shutdown & RCV_SHUTDOWN)) {
err = -EPIPE;
goto out_wait;
}
err = transport->notify_send_pre_enqueue(vsk, &send_data);
if (err < 0)
goto out_wait;
/* Note that enqueue will only write as many bytes as are free
* in the produce queue, so we don't need to ensure len is
* smaller than the queue size. It is the caller's
* responsibility to check how many bytes we were able to send.
*/
written = transport->stream_enqueue(
vsk, msg->msg_iov,
len - total_written);
if (written < 0) {
err = -ENOMEM;
goto out_wait;
}
total_written += written;
err = transport->notify_send_post_enqueue(
vsk, written, &send_data);
if (err < 0)
goto out_wait;
}
out_wait:
if (total_written > 0)
err = total_written;
finish_wait(sk_sleep(sk), &wait);
out:
release_sock(sk);
return err;
}
static int
vsock_stream_recvmsg(struct kiocb *kiocb,
struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk;
struct vsock_sock *vsk;
int err;
size_t target;
ssize_t copied;
long timeout;
struct vsock_transport_recv_notify_data recv_data;
DEFINE_WAIT(wait);
sk = sock->sk;
vsk = vsock_sk(sk);
err = 0;
msg->msg_namelen = 0;
lock_sock(sk);
if (sk->sk_state != SS_CONNECTED) {
/* Recvmsg is supposed to return 0 if a peer performs an
* orderly shutdown. Differentiate between that case and when a
* peer has not connected or a local shutdown occured with the
* SOCK_DONE flag.
*/
if (sock_flag(sk, SOCK_DONE))
err = 0;
else
err = -ENOTCONN;
goto out;
}
if (flags & MSG_OOB) {
err = -EOPNOTSUPP;
goto out;
}
/* We don't check peer_shutdown flag here since peer may actually shut
* down, but there can be data in the queue that a local socket can
* receive.
*/
if (sk->sk_shutdown & RCV_SHUTDOWN) {
err = 0;
goto out;
}
/* It is valid on Linux to pass in a zero-length receive buffer. This
* is not an error. We may as well bail out now.
*/
if (!len) {
err = 0;
goto out;
}
/* We must not copy less than target bytes into the user's buffer
* before returning successfully, so we wait for the consume queue to
* have that much data to consume before dequeueing. Note that this
* makes it impossible to handle cases where target is greater than the
* queue size.
*/
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
if (target >= transport->stream_rcvhiwat(vsk)) {
err = -ENOMEM;
goto out;
}
timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
copied = 0;
err = transport->notify_recv_init(vsk, target, &recv_data);
if (err < 0)
goto out;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
while (1) {
s64 ready = vsock_stream_has_data(vsk);
if (ready < 0) {
/* Invalid queue pair content. XXX This should be
* changed to a connection reset in a later change.
*/
err = -ENOMEM;
goto out_wait;
} else if (ready > 0) {
ssize_t read;
err = transport->notify_recv_pre_dequeue(
vsk, target, &recv_data);
if (err < 0)
break;
read = transport->stream_dequeue(
vsk, msg->msg_iov,
len - copied, flags);
if (read < 0) {
err = -ENOMEM;
break;
}
copied += read;
err = transport->notify_recv_post_dequeue(
vsk, target, read,
!(flags & MSG_PEEK), &recv_data);
if (err < 0)
goto out_wait;
if (read >= target || flags & MSG_PEEK)
break;
target -= read;
} else {
if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN)
|| (vsk->peer_shutdown & SEND_SHUTDOWN)) {
break;
}
/* Don't wait for non-blocking sockets. */
if (timeout == 0) {
err = -EAGAIN;
break;
}
err = transport->notify_recv_pre_block(
vsk, target, &recv_data);
if (err < 0)
break;
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
break;
} else if (timeout == 0) {
err = -EAGAIN;
break;
}
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
}
}
if (sk->sk_err)
err = -sk->sk_err;
else if (sk->sk_shutdown & RCV_SHUTDOWN)
err = 0;
if (copied > 0) {
/* We only do these additional bookkeeping/notification steps
* if we actually copied something out of the queue pair
* instead of just peeking ahead.
*/
if (!(flags & MSG_PEEK)) {
/* If the other side has shutdown for sending and there
* is nothing more to read, then modify the socket
* state.
*/
if (vsk->peer_shutdown & SEND_SHUTDOWN) {
if (vsock_stream_has_data(vsk) <= 0) {
sk->sk_state = SS_UNCONNECTED;
sock_set_flag(sk, SOCK_DONE);
sk->sk_state_change(sk);
}
}
}
err = copied;
}
out_wait:
finish_wait(sk_sleep(sk), &wait);
out:
release_sock(sk);
return err;
}
static const struct proto_ops vsock_stream_ops = {
.family = PF_VSOCK,
.owner = THIS_MODULE,
.release = vsock_release,
.bind = vsock_bind,
.connect = vsock_stream_connect,
.socketpair = sock_no_socketpair,
.accept = vsock_accept,
.getname = vsock_getname,
.poll = vsock_poll,
.ioctl = sock_no_ioctl,
.listen = vsock_listen,
.shutdown = vsock_shutdown,
.setsockopt = vsock_stream_setsockopt,
.getsockopt = vsock_stream_getsockopt,
.sendmsg = vsock_stream_sendmsg,
.recvmsg = vsock_stream_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static int vsock_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
if (!sock)
return -EINVAL;
if (protocol && protocol != PF_VSOCK)
return -EPROTONOSUPPORT;
switch (sock->type) {
case SOCK_DGRAM:
sock->ops = &vsock_dgram_ops;
break;
case SOCK_STREAM:
sock->ops = &vsock_stream_ops;
break;
default:
return -ESOCKTNOSUPPORT;
}
sock->state = SS_UNCONNECTED;
return __vsock_create(net, sock, NULL, GFP_KERNEL, 0) ? 0 : -ENOMEM;
}
static const struct net_proto_family vsock_family_ops = {
.family = AF_VSOCK,
.create = vsock_create,
.owner = THIS_MODULE,
};
static long vsock_dev_do_ioctl(struct file *filp,
unsigned int cmd, void __user *ptr)
{
u32 __user *p = ptr;
int retval = 0;
switch (cmd) {
case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
if (put_user(transport->get_local_cid(), p) != 0)
retval = -EFAULT;
break;
default:
pr_err("Unknown ioctl %d\n", cmd);
retval = -EINVAL;
}
return retval;
}
static long vsock_dev_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg);
}
#ifdef CONFIG_COMPAT
static long vsock_dev_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg));
}
#endif
static const struct file_operations vsock_device_ops = {
.owner = THIS_MODULE,
.unlocked_ioctl = vsock_dev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vsock_dev_compat_ioctl,
#endif
.open = nonseekable_open,
};
static struct miscdevice vsock_device = {
.name = "vsock",
.minor = MISC_DYNAMIC_MINOR,
.fops = &vsock_device_ops,
};
static int __vsock_core_init(void)
{
int err;
vsock_init_tables();
err = misc_register(&vsock_device);
if (err) {
pr_err("Failed to register misc device\n");
return -ENOENT;
}
err = proto_register(&vsock_proto, 1); /* we want our slab */
if (err) {
pr_err("Cannot register vsock protocol\n");
goto err_misc_deregister;
}
err = sock_register(&vsock_family_ops);
if (err) {
pr_err("could not register af_vsock (%d) address family: %d\n",
AF_VSOCK, err);
goto err_unregister_proto;
}
return 0;
err_unregister_proto:
proto_unregister(&vsock_proto);
err_misc_deregister:
misc_deregister(&vsock_device);
return err;
}
int vsock_core_init(const struct vsock_transport *t)
{
int retval = mutex_lock_interruptible(&vsock_register_mutex);
if (retval)
return retval;
if (transport) {
retval = -EBUSY;
goto out;
}
transport = t;
retval = __vsock_core_init();
if (retval)
transport = NULL;
out:
mutex_unlock(&vsock_register_mutex);
return retval;
}
EXPORT_SYMBOL_GPL(vsock_core_init);
void vsock_core_exit(void)
{
mutex_lock(&vsock_register_mutex);
misc_deregister(&vsock_device);
sock_unregister(AF_VSOCK);
proto_unregister(&vsock_proto);
/* We do not want the assignment below re-ordered. */
mb();
transport = NULL;
mutex_unlock(&vsock_register_mutex);
}
EXPORT_SYMBOL_GPL(vsock_core_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Socket Family");
MODULE_VERSION("1.0.0.0-k");
MODULE_LICENSE("GPL v2");
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_5697_0 |
crossvul-cpp_data_good_295_2 | /* $OpenBSD: auth2-pubkey.c,v 1.83 2018/07/31 03:10:27 djm Exp $ */
/*
* Copyright (c) 2000 Markus Friedl. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
#include <fcntl.h>
#include <paths.h>
#include <pwd.h>
#include <signal.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <limits.h>
#include "xmalloc.h"
#include "ssh.h"
#include "ssh2.h"
#include "packet.h"
#include "sshbuf.h"
#include "log.h"
#include "misc.h"
#include "servconf.h"
#include "compat.h"
#include "sshkey.h"
#include "hostfile.h"
#include "auth.h"
#include "pathnames.h"
#include "uidswap.h"
#include "auth-options.h"
#include "canohost.h"
#ifdef GSSAPI
#include "ssh-gss.h"
#endif
#include "monitor_wrap.h"
#include "authfile.h"
#include "match.h"
#include "ssherr.h"
#include "channels.h" /* XXX for session.h */
#include "session.h" /* XXX for child_set_env(); refactor? */
/* import */
extern ServerOptions options;
extern u_char *session_id2;
extern u_int session_id2_len;
static char *
format_key(const struct sshkey *key)
{
char *ret, *fp = sshkey_fingerprint(key,
options.fingerprint_hash, SSH_FP_DEFAULT);
xasprintf(&ret, "%s %s", sshkey_type(key), fp);
free(fp);
return ret;
}
static int
userauth_pubkey(struct ssh *ssh)
{
Authctxt *authctxt = ssh->authctxt;
struct passwd *pw = authctxt->pw;
struct sshbuf *b = NULL;
struct sshkey *key = NULL;
char *pkalg = NULL, *userstyle = NULL, *key_s = NULL, *ca_s = NULL;
u_char *pkblob = NULL, *sig = NULL, have_sig;
size_t blen, slen;
int r, pktype;
int authenticated = 0;
struct sshauthopt *authopts = NULL;
if ((r = sshpkt_get_u8(ssh, &have_sig)) != 0 ||
(r = sshpkt_get_cstring(ssh, &pkalg, NULL)) != 0 ||
(r = sshpkt_get_string(ssh, &pkblob, &blen)) != 0)
fatal("%s: parse request failed: %s", __func__, ssh_err(r));
pktype = sshkey_type_from_name(pkalg);
if (pktype == KEY_UNSPEC) {
/* this is perfectly legal */
verbose("%s: unsupported public key algorithm: %s",
__func__, pkalg);
goto done;
}
if ((r = sshkey_from_blob(pkblob, blen, &key)) != 0) {
error("%s: could not parse key: %s", __func__, ssh_err(r));
goto done;
}
if (key == NULL) {
error("%s: cannot decode key: %s", __func__, pkalg);
goto done;
}
if (key->type != pktype) {
error("%s: type mismatch for decoded key "
"(received %d, expected %d)", __func__, key->type, pktype);
goto done;
}
if (sshkey_type_plain(key->type) == KEY_RSA &&
(ssh->compat & SSH_BUG_RSASIGMD5) != 0) {
logit("Refusing RSA key because client uses unsafe "
"signature scheme");
goto done;
}
if (auth2_key_already_used(authctxt, key)) {
logit("refusing previously-used %s key", sshkey_type(key));
goto done;
}
if (match_pattern_list(pkalg, options.pubkey_key_types, 0) != 1) {
logit("%s: key type %s not in PubkeyAcceptedKeyTypes",
__func__, sshkey_ssh_name(key));
goto done;
}
key_s = format_key(key);
if (sshkey_is_cert(key))
ca_s = format_key(key->cert->signature_key);
if (have_sig) {
debug3("%s: have %s signature for %s%s%s",
__func__, pkalg, key_s,
ca_s == NULL ? "" : " CA ",
ca_s == NULL ? "" : ca_s);
if ((r = sshpkt_get_string(ssh, &sig, &slen)) != 0 ||
(r = sshpkt_get_end(ssh)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
if ((b = sshbuf_new()) == NULL)
fatal("%s: sshbuf_new failed", __func__);
if (ssh->compat & SSH_OLD_SESSIONID) {
if ((r = sshbuf_put(b, session_id2,
session_id2_len)) != 0)
fatal("%s: sshbuf_put session id: %s",
__func__, ssh_err(r));
} else {
if ((r = sshbuf_put_string(b, session_id2,
session_id2_len)) != 0)
fatal("%s: sshbuf_put_string session id: %s",
__func__, ssh_err(r));
}
if (!authctxt->valid || authctxt->user == NULL) {
debug2("%s: disabled because of invalid user",
__func__);
goto done;
}
/* reconstruct packet */
xasprintf(&userstyle, "%s%s%s", authctxt->user,
authctxt->style ? ":" : "",
authctxt->style ? authctxt->style : "");
if ((r = sshbuf_put_u8(b, SSH2_MSG_USERAUTH_REQUEST)) != 0 ||
(r = sshbuf_put_cstring(b, userstyle)) != 0 ||
(r = sshbuf_put_cstring(b, authctxt->service)) != 0 ||
(r = sshbuf_put_cstring(b, "publickey")) != 0 ||
(r = sshbuf_put_u8(b, have_sig)) != 0 ||
(r = sshbuf_put_cstring(b, pkalg) != 0) ||
(r = sshbuf_put_string(b, pkblob, blen)) != 0)
fatal("%s: build packet failed: %s",
__func__, ssh_err(r));
#ifdef DEBUG_PK
sshbuf_dump(b, stderr);
#endif
/* test for correct signature */
authenticated = 0;
if (PRIVSEP(user_key_allowed(ssh, pw, key, 1, &authopts)) &&
PRIVSEP(sshkey_verify(key, sig, slen,
sshbuf_ptr(b), sshbuf_len(b),
(ssh->compat & SSH_BUG_SIGTYPE) == 0 ? pkalg : NULL,
ssh->compat)) == 0) {
authenticated = 1;
}
sshbuf_free(b);
auth2_record_key(authctxt, authenticated, key);
} else {
debug("%s: test pkalg %s pkblob %s%s%s",
__func__, pkalg, key_s,
ca_s == NULL ? "" : " CA ",
ca_s == NULL ? "" : ca_s);
if ((r = sshpkt_get_end(ssh)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
if (!authctxt->valid || authctxt->user == NULL) {
debug2("%s: disabled because of invalid user",
__func__);
goto done;
}
/* XXX fake reply and always send PK_OK ? */
/*
* XXX this allows testing whether a user is allowed
* to login: if you happen to have a valid pubkey this
* message is sent. the message is NEVER sent at all
* if a user is not allowed to login. is this an
* issue? -markus
*/
if (PRIVSEP(user_key_allowed(ssh, pw, key, 0, NULL))) {
if ((r = sshpkt_start(ssh, SSH2_MSG_USERAUTH_PK_OK))
!= 0 ||
(r = sshpkt_put_cstring(ssh, pkalg)) != 0 ||
(r = sshpkt_put_string(ssh, pkblob, blen)) != 0 ||
(r = sshpkt_send(ssh)) != 0 ||
(r = ssh_packet_write_wait(ssh)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
authctxt->postponed = 1;
}
}
done:
if (authenticated == 1 && auth_activate_options(ssh, authopts) != 0) {
debug("%s: key options inconsistent with existing", __func__);
authenticated = 0;
}
debug2("%s: authenticated %d pkalg %s", __func__, authenticated, pkalg);
sshauthopt_free(authopts);
sshkey_free(key);
free(userstyle);
free(pkalg);
free(pkblob);
free(key_s);
free(ca_s);
free(sig);
return authenticated;
}
static int
match_principals_option(const char *principal_list, struct sshkey_cert *cert)
{
char *result;
u_int i;
/* XXX percent_expand() sequences for authorized_principals? */
for (i = 0; i < cert->nprincipals; i++) {
if ((result = match_list(cert->principals[i],
principal_list, NULL)) != NULL) {
debug3("matched principal from key options \"%.100s\"",
result);
free(result);
return 1;
}
}
return 0;
}
/*
* Process a single authorized_principals format line. Returns 0 and sets
* authoptsp is principal is authorised, -1 otherwise. "loc" is used as a
* log preamble for file/line information.
*/
static int
check_principals_line(struct ssh *ssh, char *cp, const struct sshkey_cert *cert,
const char *loc, struct sshauthopt **authoptsp)
{
u_int i, found = 0;
char *ep, *line_opts;
const char *reason = NULL;
struct sshauthopt *opts = NULL;
if (authoptsp != NULL)
*authoptsp = NULL;
/* Trim trailing whitespace. */
ep = cp + strlen(cp) - 1;
while (ep > cp && (*ep == '\n' || *ep == ' ' || *ep == '\t'))
*ep-- = '\0';
/*
* If the line has internal whitespace then assume it has
* key options.
*/
line_opts = NULL;
if ((ep = strrchr(cp, ' ')) != NULL ||
(ep = strrchr(cp, '\t')) != NULL) {
for (; *ep == ' ' || *ep == '\t'; ep++)
;
line_opts = cp;
cp = ep;
}
if ((opts = sshauthopt_parse(line_opts, &reason)) == NULL) {
debug("%s: bad principals options: %s", loc, reason);
auth_debug_add("%s: bad principals options: %s", loc, reason);
return -1;
}
/* Check principals in cert against those on line */
for (i = 0; i < cert->nprincipals; i++) {
if (strcmp(cp, cert->principals[i]) != 0)
continue;
debug3("%s: matched principal \"%.100s\"",
loc, cert->principals[i]);
found = 1;
}
if (found && authoptsp != NULL) {
*authoptsp = opts;
opts = NULL;
}
sshauthopt_free(opts);
return found ? 0 : -1;
}
static int
process_principals(struct ssh *ssh, FILE *f, const char *file,
const struct sshkey_cert *cert, struct sshauthopt **authoptsp)
{
char loc[256], *line = NULL, *cp, *ep;
size_t linesize = 0;
u_long linenum = 0;
u_int found_principal = 0;
if (authoptsp != NULL)
*authoptsp = NULL;
while (getline(&line, &linesize, f) != -1) {
linenum++;
/* Always consume entire input */
if (found_principal)
continue;
/* Skip leading whitespace. */
for (cp = line; *cp == ' ' || *cp == '\t'; cp++)
;
/* Skip blank and comment lines. */
if ((ep = strchr(cp, '#')) != NULL)
*ep = '\0';
if (!*cp || *cp == '\n')
continue;
snprintf(loc, sizeof(loc), "%.200s:%lu", file, linenum);
if (check_principals_line(ssh, cp, cert, loc, authoptsp) == 0)
found_principal = 1;
}
free(line);
return found_principal;
}
/* XXX remove pw args here and elsewhere once ssh->authctxt is guaranteed */
static int
match_principals_file(struct ssh *ssh, struct passwd *pw, char *file,
struct sshkey_cert *cert, struct sshauthopt **authoptsp)
{
FILE *f;
int success;
if (authoptsp != NULL)
*authoptsp = NULL;
temporarily_use_uid(pw);
debug("trying authorized principals file %s", file);
if ((f = auth_openprincipals(file, pw, options.strict_modes)) == NULL) {
restore_uid();
return 0;
}
success = process_principals(ssh, f, file, cert, authoptsp);
fclose(f);
restore_uid();
return success;
}
/*
* Checks whether principal is allowed in output of command.
* returns 1 if the principal is allowed or 0 otherwise.
*/
static int
match_principals_command(struct ssh *ssh, struct passwd *user_pw,
const struct sshkey *key, struct sshauthopt **authoptsp)
{
struct passwd *runas_pw = NULL;
const struct sshkey_cert *cert = key->cert;
FILE *f = NULL;
int r, ok, found_principal = 0;
int i, ac = 0, uid_swapped = 0;
pid_t pid;
char *tmp, *username = NULL, *command = NULL, **av = NULL;
char *ca_fp = NULL, *key_fp = NULL, *catext = NULL, *keytext = NULL;
char serial_s[16], uidstr[32];
void (*osigchld)(int);
if (authoptsp != NULL)
*authoptsp = NULL;
if (options.authorized_principals_command == NULL)
return 0;
if (options.authorized_principals_command_user == NULL) {
error("No user for AuthorizedPrincipalsCommand specified, "
"skipping");
return 0;
}
/*
* NB. all returns later this function should go via "out" to
* ensure the original SIGCHLD handler is restored properly.
*/
osigchld = signal(SIGCHLD, SIG_DFL);
/* Prepare and verify the user for the command */
username = percent_expand(options.authorized_principals_command_user,
"u", user_pw->pw_name, (char *)NULL);
runas_pw = getpwnam(username);
if (runas_pw == NULL) {
error("AuthorizedPrincipalsCommandUser \"%s\" not found: %s",
username, strerror(errno));
goto out;
}
/* Turn the command into an argument vector */
if (argv_split(options.authorized_principals_command, &ac, &av) != 0) {
error("AuthorizedPrincipalsCommand \"%s\" contains "
"invalid quotes", command);
goto out;
}
if (ac == 0) {
error("AuthorizedPrincipalsCommand \"%s\" yielded no arguments",
command);
goto out;
}
if ((ca_fp = sshkey_fingerprint(cert->signature_key,
options.fingerprint_hash, SSH_FP_DEFAULT)) == NULL) {
error("%s: sshkey_fingerprint failed", __func__);
goto out;
}
if ((key_fp = sshkey_fingerprint(key,
options.fingerprint_hash, SSH_FP_DEFAULT)) == NULL) {
error("%s: sshkey_fingerprint failed", __func__);
goto out;
}
if ((r = sshkey_to_base64(cert->signature_key, &catext)) != 0) {
error("%s: sshkey_to_base64 failed: %s", __func__, ssh_err(r));
goto out;
}
if ((r = sshkey_to_base64(key, &keytext)) != 0) {
error("%s: sshkey_to_base64 failed: %s", __func__, ssh_err(r));
goto out;
}
snprintf(serial_s, sizeof(serial_s), "%llu",
(unsigned long long)cert->serial);
snprintf(uidstr, sizeof(uidstr), "%llu",
(unsigned long long)user_pw->pw_uid);
for (i = 1; i < ac; i++) {
tmp = percent_expand(av[i],
"U", uidstr,
"u", user_pw->pw_name,
"h", user_pw->pw_dir,
"t", sshkey_ssh_name(key),
"T", sshkey_ssh_name(cert->signature_key),
"f", key_fp,
"F", ca_fp,
"k", keytext,
"K", catext,
"i", cert->key_id,
"s", serial_s,
(char *)NULL);
if (tmp == NULL)
fatal("%s: percent_expand failed", __func__);
free(av[i]);
av[i] = tmp;
}
/* Prepare a printable command for logs, etc. */
command = argv_assemble(ac, av);
if ((pid = subprocess("AuthorizedPrincipalsCommand", runas_pw, command,
ac, av, &f,
SSH_SUBPROCESS_STDOUT_CAPTURE|SSH_SUBPROCESS_STDERR_DISCARD)) == 0)
goto out;
uid_swapped = 1;
temporarily_use_uid(runas_pw);
ok = process_principals(ssh, f, "(command)", cert, authoptsp);
fclose(f);
f = NULL;
if (exited_cleanly(pid, "AuthorizedPrincipalsCommand", command, 0) != 0)
goto out;
/* Read completed successfully */
found_principal = ok;
out:
if (f != NULL)
fclose(f);
signal(SIGCHLD, osigchld);
for (i = 0; i < ac; i++)
free(av[i]);
free(av);
if (uid_swapped)
restore_uid();
free(command);
free(username);
free(ca_fp);
free(key_fp);
free(catext);
free(keytext);
return found_principal;
}
static void
skip_space(char **cpp)
{
char *cp;
for (cp = *cpp; *cp == ' ' || *cp == '\t'; cp++)
;
*cpp = cp;
}
/*
* Advanced *cpp past the end of key options, defined as the first unquoted
* whitespace character. Returns 0 on success or -1 on failure (e.g.
* unterminated quotes).
*/
static int
advance_past_options(char **cpp)
{
char *cp = *cpp;
int quoted = 0;
for (; *cp && (quoted || (*cp != ' ' && *cp != '\t')); cp++) {
if (*cp == '\\' && cp[1] == '"')
cp++; /* Skip both */
else if (*cp == '"')
quoted = !quoted;
}
*cpp = cp;
/* return failure for unterminated quotes */
return (*cp == '\0' && quoted) ? -1 : 0;
}
/*
* Check a single line of an authorized_keys-format file. Returns 0 if key
* matches, -1 otherwise. Will return key/cert options via *authoptsp
* on success. "loc" is used as file/line location in log messages.
*/
static int
check_authkey_line(struct ssh *ssh, struct passwd *pw, struct sshkey *key,
char *cp, const char *loc, struct sshauthopt **authoptsp)
{
int want_keytype = sshkey_is_cert(key) ? KEY_UNSPEC : key->type;
struct sshkey *found = NULL;
struct sshauthopt *keyopts = NULL, *certopts = NULL, *finalopts = NULL;
char *key_options = NULL, *fp = NULL;
const char *reason = NULL;
int ret = -1;
if (authoptsp != NULL)
*authoptsp = NULL;
if ((found = sshkey_new(want_keytype)) == NULL) {
debug3("%s: keytype %d failed", __func__, want_keytype);
goto out;
}
/* XXX djm: peek at key type in line and skip if unwanted */
if (sshkey_read(found, &cp) != 0) {
/* no key? check for options */
debug2("%s: check options: '%s'", loc, cp);
key_options = cp;
if (advance_past_options(&cp) != 0) {
reason = "invalid key option string";
goto fail_reason;
}
skip_space(&cp);
if (sshkey_read(found, &cp) != 0) {
/* still no key? advance to next line*/
debug2("%s: advance: '%s'", loc, cp);
goto out;
}
}
/* Parse key options now; we need to know if this is a CA key */
if ((keyopts = sshauthopt_parse(key_options, &reason)) == NULL) {
debug("%s: bad key options: %s", loc, reason);
auth_debug_add("%s: bad key options: %s", loc, reason);
goto out;
}
/* Ignore keys that don't match or incorrectly marked as CAs */
if (sshkey_is_cert(key)) {
/* Certificate; check signature key against CA */
if (!sshkey_equal(found, key->cert->signature_key) ||
!keyopts->cert_authority)
goto out;
} else {
/* Plain key: check it against key found in file */
if (!sshkey_equal(found, key) || keyopts->cert_authority)
goto out;
}
/* We have a candidate key, perform authorisation checks */
if ((fp = sshkey_fingerprint(found,
options.fingerprint_hash, SSH_FP_DEFAULT)) == NULL)
fatal("%s: fingerprint failed", __func__);
debug("%s: matching %s found: %s %s", loc,
sshkey_is_cert(key) ? "CA" : "key", sshkey_type(found), fp);
if (auth_authorise_keyopts(ssh, pw, keyopts,
sshkey_is_cert(key), loc) != 0) {
reason = "Refused by key options";
goto fail_reason;
}
/* That's all we need for plain keys. */
if (!sshkey_is_cert(key)) {
verbose("Accepted key %s %s found at %s",
sshkey_type(found), fp, loc);
finalopts = keyopts;
keyopts = NULL;
goto success;
}
/*
* Additional authorisation for certificates.
*/
/* Parse and check options present in certificate */
if ((certopts = sshauthopt_from_cert(key)) == NULL) {
reason = "Invalid certificate options";
goto fail_reason;
}
if (auth_authorise_keyopts(ssh, pw, certopts, 0, loc) != 0) {
reason = "Refused by certificate options";
goto fail_reason;
}
if ((finalopts = sshauthopt_merge(keyopts, certopts, &reason)) == NULL)
goto fail_reason;
/*
* If the user has specified a list of principals as
* a key option, then prefer that list to matching
* their username in the certificate principals list.
*/
if (keyopts->cert_principals != NULL &&
!match_principals_option(keyopts->cert_principals, key->cert)) {
reason = "Certificate does not contain an authorized principal";
goto fail_reason;
}
if (sshkey_cert_check_authority(key, 0, 0,
keyopts->cert_principals == NULL ? pw->pw_name : NULL, &reason) != 0)
goto fail_reason;
verbose("Accepted certificate ID \"%s\" (serial %llu) "
"signed by CA %s %s found at %s",
key->cert->key_id,
(unsigned long long)key->cert->serial,
sshkey_type(found), fp, loc);
success:
if (finalopts == NULL)
fatal("%s: internal error: missing options", __func__);
if (authoptsp != NULL) {
*authoptsp = finalopts;
finalopts = NULL;
}
/* success */
ret = 0;
goto out;
fail_reason:
error("%s", reason);
auth_debug_add("%s", reason);
out:
free(fp);
sshauthopt_free(keyopts);
sshauthopt_free(certopts);
sshauthopt_free(finalopts);
sshkey_free(found);
return ret;
}
/*
* Checks whether key is allowed in authorized_keys-format file,
* returns 1 if the key is allowed or 0 otherwise.
*/
static int
check_authkeys_file(struct ssh *ssh, struct passwd *pw, FILE *f,
char *file, struct sshkey *key, struct sshauthopt **authoptsp)
{
char *cp, *line = NULL, loc[256];
size_t linesize = 0;
int found_key = 0;
u_long linenum = 0;
if (authoptsp != NULL)
*authoptsp = NULL;
while (getline(&line, &linesize, f) != -1) {
linenum++;
/* Always consume entire file */
if (found_key)
continue;
/* Skip leading whitespace, empty and comment lines. */
cp = line;
skip_space(&cp);
if (!*cp || *cp == '\n' || *cp == '#')
continue;
snprintf(loc, sizeof(loc), "%.200s:%lu", file, linenum);
if (check_authkey_line(ssh, pw, key, cp, loc, authoptsp) == 0)
found_key = 1;
}
free(line);
return found_key;
}
/* Authenticate a certificate key against TrustedUserCAKeys */
static int
user_cert_trusted_ca(struct ssh *ssh, struct passwd *pw, struct sshkey *key,
struct sshauthopt **authoptsp)
{
char *ca_fp, *principals_file = NULL;
const char *reason;
struct sshauthopt *principals_opts = NULL, *cert_opts = NULL;
struct sshauthopt *final_opts = NULL;
int r, ret = 0, found_principal = 0, use_authorized_principals;
if (authoptsp != NULL)
*authoptsp = NULL;
if (!sshkey_is_cert(key) || options.trusted_user_ca_keys == NULL)
return 0;
if ((ca_fp = sshkey_fingerprint(key->cert->signature_key,
options.fingerprint_hash, SSH_FP_DEFAULT)) == NULL)
return 0;
if ((r = sshkey_in_file(key->cert->signature_key,
options.trusted_user_ca_keys, 1, 0)) != 0) {
debug2("%s: CA %s %s is not listed in %s: %s", __func__,
sshkey_type(key->cert->signature_key), ca_fp,
options.trusted_user_ca_keys, ssh_err(r));
goto out;
}
/*
* If AuthorizedPrincipals is in use, then compare the certificate
* principals against the names in that file rather than matching
* against the username.
*/
if ((principals_file = authorized_principals_file(pw)) != NULL) {
if (match_principals_file(ssh, pw, principals_file,
key->cert, &principals_opts))
found_principal = 1;
}
/* Try querying command if specified */
if (!found_principal && match_principals_command(ssh, pw, key,
&principals_opts))
found_principal = 1;
/* If principals file or command is specified, then require a match */
use_authorized_principals = principals_file != NULL ||
options.authorized_principals_command != NULL;
if (!found_principal && use_authorized_principals) {
reason = "Certificate does not contain an authorized principal";
goto fail_reason;
}
if (use_authorized_principals && principals_opts == NULL)
fatal("%s: internal error: missing principals_opts", __func__);
if (sshkey_cert_check_authority(key, 0, 1,
use_authorized_principals ? NULL : pw->pw_name, &reason) != 0)
goto fail_reason;
/* Check authority from options in key and from principals file/cmd */
if ((cert_opts = sshauthopt_from_cert(key)) == NULL) {
reason = "Invalid certificate options";
goto fail_reason;
}
if (auth_authorise_keyopts(ssh, pw, cert_opts, 0, "cert") != 0) {
reason = "Refused by certificate options";
goto fail_reason;
}
if (principals_opts == NULL) {
final_opts = cert_opts;
cert_opts = NULL;
} else {
if (auth_authorise_keyopts(ssh, pw, principals_opts, 0,
"principals") != 0) {
reason = "Refused by certificate principals options";
goto fail_reason;
}
if ((final_opts = sshauthopt_merge(principals_opts,
cert_opts, &reason)) == NULL) {
fail_reason:
error("%s", reason);
auth_debug_add("%s", reason);
goto out;
}
}
/* Success */
verbose("Accepted certificate ID \"%s\" (serial %llu) signed by "
"%s CA %s via %s", key->cert->key_id,
(unsigned long long)key->cert->serial,
sshkey_type(key->cert->signature_key), ca_fp,
options.trusted_user_ca_keys);
if (authoptsp != NULL) {
*authoptsp = final_opts;
final_opts = NULL;
}
ret = 1;
out:
sshauthopt_free(principals_opts);
sshauthopt_free(cert_opts);
sshauthopt_free(final_opts);
free(principals_file);
free(ca_fp);
return ret;
}
/*
* Checks whether key is allowed in file.
* returns 1 if the key is allowed or 0 otherwise.
*/
static int
user_key_allowed2(struct ssh *ssh, struct passwd *pw, struct sshkey *key,
char *file, struct sshauthopt **authoptsp)
{
FILE *f;
int found_key = 0;
if (authoptsp != NULL)
*authoptsp = NULL;
/* Temporarily use the user's uid. */
temporarily_use_uid(pw);
debug("trying public key file %s", file);
if ((f = auth_openkeyfile(file, pw, options.strict_modes)) != NULL) {
found_key = check_authkeys_file(ssh, pw, f, file,
key, authoptsp);
fclose(f);
}
restore_uid();
return found_key;
}
/*
* Checks whether key is allowed in output of command.
* returns 1 if the key is allowed or 0 otherwise.
*/
static int
user_key_command_allowed2(struct ssh *ssh, struct passwd *user_pw,
struct sshkey *key, struct sshauthopt **authoptsp)
{
struct passwd *runas_pw = NULL;
FILE *f = NULL;
int r, ok, found_key = 0;
int i, uid_swapped = 0, ac = 0;
pid_t pid;
char *username = NULL, *key_fp = NULL, *keytext = NULL;
char uidstr[32], *tmp, *command = NULL, **av = NULL;
void (*osigchld)(int);
if (authoptsp != NULL)
*authoptsp = NULL;
if (options.authorized_keys_command == NULL)
return 0;
if (options.authorized_keys_command_user == NULL) {
error("No user for AuthorizedKeysCommand specified, skipping");
return 0;
}
/*
* NB. all returns later this function should go via "out" to
* ensure the original SIGCHLD handler is restored properly.
*/
osigchld = signal(SIGCHLD, SIG_DFL);
/* Prepare and verify the user for the command */
username = percent_expand(options.authorized_keys_command_user,
"u", user_pw->pw_name, (char *)NULL);
runas_pw = getpwnam(username);
if (runas_pw == NULL) {
error("AuthorizedKeysCommandUser \"%s\" not found: %s",
username, strerror(errno));
goto out;
}
/* Prepare AuthorizedKeysCommand */
if ((key_fp = sshkey_fingerprint(key, options.fingerprint_hash,
SSH_FP_DEFAULT)) == NULL) {
error("%s: sshkey_fingerprint failed", __func__);
goto out;
}
if ((r = sshkey_to_base64(key, &keytext)) != 0) {
error("%s: sshkey_to_base64 failed: %s", __func__, ssh_err(r));
goto out;
}
/* Turn the command into an argument vector */
if (argv_split(options.authorized_keys_command, &ac, &av) != 0) {
error("AuthorizedKeysCommand \"%s\" contains invalid quotes",
command);
goto out;
}
if (ac == 0) {
error("AuthorizedKeysCommand \"%s\" yielded no arguments",
command);
goto out;
}
snprintf(uidstr, sizeof(uidstr), "%llu",
(unsigned long long)user_pw->pw_uid);
for (i = 1; i < ac; i++) {
tmp = percent_expand(av[i],
"U", uidstr,
"u", user_pw->pw_name,
"h", user_pw->pw_dir,
"t", sshkey_ssh_name(key),
"f", key_fp,
"k", keytext,
(char *)NULL);
if (tmp == NULL)
fatal("%s: percent_expand failed", __func__);
free(av[i]);
av[i] = tmp;
}
/* Prepare a printable command for logs, etc. */
command = argv_assemble(ac, av);
/*
* If AuthorizedKeysCommand was run without arguments
* then fall back to the old behaviour of passing the
* target username as a single argument.
*/
if (ac == 1) {
av = xreallocarray(av, ac + 2, sizeof(*av));
av[1] = xstrdup(user_pw->pw_name);
av[2] = NULL;
/* Fix up command too, since it is used in log messages */
free(command);
xasprintf(&command, "%s %s", av[0], av[1]);
}
if ((pid = subprocess("AuthorizedKeysCommand", runas_pw, command,
ac, av, &f,
SSH_SUBPROCESS_STDOUT_CAPTURE|SSH_SUBPROCESS_STDERR_DISCARD)) == 0)
goto out;
uid_swapped = 1;
temporarily_use_uid(runas_pw);
ok = check_authkeys_file(ssh, user_pw, f,
options.authorized_keys_command, key, authoptsp);
fclose(f);
f = NULL;
if (exited_cleanly(pid, "AuthorizedKeysCommand", command, 0) != 0)
goto out;
/* Read completed successfully */
found_key = ok;
out:
if (f != NULL)
fclose(f);
signal(SIGCHLD, osigchld);
for (i = 0; i < ac; i++)
free(av[i]);
free(av);
if (uid_swapped)
restore_uid();
free(command);
free(username);
free(key_fp);
free(keytext);
return found_key;
}
/*
* Check whether key authenticates and authorises the user.
*/
int
user_key_allowed(struct ssh *ssh, struct passwd *pw, struct sshkey *key,
int auth_attempt, struct sshauthopt **authoptsp)
{
u_int success, i;
char *file;
struct sshauthopt *opts = NULL;
if (authoptsp != NULL)
*authoptsp = NULL;
if (auth_key_is_revoked(key))
return 0;
if (sshkey_is_cert(key) &&
auth_key_is_revoked(key->cert->signature_key))
return 0;
if ((success = user_cert_trusted_ca(ssh, pw, key, &opts)) != 0)
goto out;
sshauthopt_free(opts);
opts = NULL;
if ((success = user_key_command_allowed2(ssh, pw, key, &opts)) != 0)
goto out;
sshauthopt_free(opts);
opts = NULL;
for (i = 0; !success && i < options.num_authkeys_files; i++) {
if (strcasecmp(options.authorized_keys_files[i], "none") == 0)
continue;
file = expand_authorized_keys(
options.authorized_keys_files[i], pw);
success = user_key_allowed2(ssh, pw, key, file, &opts);
free(file);
}
out:
if (success && authoptsp != NULL) {
*authoptsp = opts;
opts = NULL;
}
sshauthopt_free(opts);
return success;
}
Authmethod method_pubkey = {
"publickey",
userauth_pubkey,
&options.pubkey_authentication
};
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_295_2 |
crossvul-cpp_data_good_3417_0 | /*
* AVI demuxer
* Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <inttypes.h>
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/bswap.h"
#include "libavutil/opt.h"
#include "libavutil/dict.h"
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "avformat.h"
#include "avi.h"
#include "dv.h"
#include "internal.h"
#include "isom.h"
#include "riff.h"
#include "libavcodec/bytestream.h"
#include "libavcodec/exif.h"
#include "libavcodec/internal.h"
typedef struct AVIStream {
int64_t frame_offset; /* current frame (video) or byte (audio) counter
* (used to compute the pts) */
int remaining;
int packet_size;
uint32_t handler;
uint32_t scale;
uint32_t rate;
int sample_size; /* size of one sample (or packet)
* (in the rate/scale sense) in bytes */
int64_t cum_len; /* temporary storage (used during seek) */
int prefix; /* normally 'd'<<8 + 'c' or 'w'<<8 + 'b' */
int prefix_count;
uint32_t pal[256];
int has_pal;
int dshow_block_align; /* block align variable used to emulate bugs in
* the MS dshow demuxer */
AVFormatContext *sub_ctx;
AVPacket sub_pkt;
uint8_t *sub_buffer;
int64_t seek_pos;
} AVIStream;
typedef struct AVIContext {
const AVClass *class;
int64_t riff_end;
int64_t movi_end;
int64_t fsize;
int64_t io_fsize;
int64_t movi_list;
int64_t last_pkt_pos;
int index_loaded;
int is_odml;
int non_interleaved;
int stream_index;
DVDemuxContext *dv_demux;
int odml_depth;
int use_odml;
#define MAX_ODML_DEPTH 1000
int64_t dts_max;
} AVIContext;
static const AVOption options[] = {
{ "use_odml", "use odml index", offsetof(AVIContext, use_odml), AV_OPT_TYPE_BOOL, {.i64 = 1}, -1, 1, AV_OPT_FLAG_DECODING_PARAM},
{ NULL },
};
static const AVClass demuxer_class = {
.class_name = "avi",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEMUXER,
};
static const char avi_headers[][8] = {
{ 'R', 'I', 'F', 'F', 'A', 'V', 'I', ' ' },
{ 'R', 'I', 'F', 'F', 'A', 'V', 'I', 'X' },
{ 'R', 'I', 'F', 'F', 'A', 'V', 'I', 0x19 },
{ 'O', 'N', '2', ' ', 'O', 'N', '2', 'f' },
{ 'R', 'I', 'F', 'F', 'A', 'M', 'V', ' ' },
{ 0 }
};
static const AVMetadataConv avi_metadata_conv[] = {
{ "strn", "title" },
{ 0 },
};
static int avi_load_index(AVFormatContext *s);
static int guess_ni_flag(AVFormatContext *s);
#define print_tag(str, tag, size) \
av_log(NULL, AV_LOG_TRACE, "pos:%"PRIX64" %s: tag=%s size=0x%x\n", \
avio_tell(pb), str, av_fourcc2str(tag), size) \
static inline int get_duration(AVIStream *ast, int len)
{
if (ast->sample_size)
return len;
else if (ast->dshow_block_align)
return (len + ast->dshow_block_align - 1) / ast->dshow_block_align;
else
return 1;
}
static int get_riff(AVFormatContext *s, AVIOContext *pb)
{
AVIContext *avi = s->priv_data;
char header[8] = {0};
int i;
/* check RIFF header */
avio_read(pb, header, 4);
avi->riff_end = avio_rl32(pb); /* RIFF chunk size */
avi->riff_end += avio_tell(pb); /* RIFF chunk end */
avio_read(pb, header + 4, 4);
for (i = 0; avi_headers[i][0]; i++)
if (!memcmp(header, avi_headers[i], 8))
break;
if (!avi_headers[i][0])
return AVERROR_INVALIDDATA;
if (header[7] == 0x19)
av_log(s, AV_LOG_INFO,
"This file has been generated by a totally broken muxer.\n");
return 0;
}
static int read_odml_index(AVFormatContext *s, int frame_num)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
int longs_per_entry = avio_rl16(pb);
int index_sub_type = avio_r8(pb);
int index_type = avio_r8(pb);
int entries_in_use = avio_rl32(pb);
int chunk_id = avio_rl32(pb);
int64_t base = avio_rl64(pb);
int stream_id = ((chunk_id & 0xFF) - '0') * 10 +
((chunk_id >> 8 & 0xFF) - '0');
AVStream *st;
AVIStream *ast;
int i;
int64_t last_pos = -1;
int64_t filesize = avi->fsize;
av_log(s, AV_LOG_TRACE,
"longs_per_entry:%d index_type:%d entries_in_use:%d "
"chunk_id:%X base:%16"PRIX64" frame_num:%d\n",
longs_per_entry,
index_type,
entries_in_use,
chunk_id,
base,
frame_num);
if (stream_id >= s->nb_streams || stream_id < 0)
return AVERROR_INVALIDDATA;
st = s->streams[stream_id];
ast = st->priv_data;
if (index_sub_type)
return AVERROR_INVALIDDATA;
avio_rl32(pb);
if (index_type && longs_per_entry != 2)
return AVERROR_INVALIDDATA;
if (index_type > 1)
return AVERROR_INVALIDDATA;
if (filesize > 0 && base >= filesize) {
av_log(s, AV_LOG_ERROR, "ODML index invalid\n");
if (base >> 32 == (base & 0xFFFFFFFF) &&
(base & 0xFFFFFFFF) < filesize &&
filesize <= 0xFFFFFFFF)
base &= 0xFFFFFFFF;
else
return AVERROR_INVALIDDATA;
}
for (i = 0; i < entries_in_use; i++) {
if (index_type) {
int64_t pos = avio_rl32(pb) + base - 8;
int len = avio_rl32(pb);
int key = len >= 0;
len &= 0x7FFFFFFF;
av_log(s, AV_LOG_TRACE, "pos:%"PRId64", len:%X\n", pos, len);
if (avio_feof(pb))
return AVERROR_INVALIDDATA;
if (last_pos == pos || pos == base - 8)
avi->non_interleaved = 1;
if (last_pos != pos && len)
av_add_index_entry(st, pos, ast->cum_len, len, 0,
key ? AVINDEX_KEYFRAME : 0);
ast->cum_len += get_duration(ast, len);
last_pos = pos;
} else {
int64_t offset, pos;
int duration;
offset = avio_rl64(pb);
avio_rl32(pb); /* size */
duration = avio_rl32(pb);
if (avio_feof(pb))
return AVERROR_INVALIDDATA;
pos = avio_tell(pb);
if (avi->odml_depth > MAX_ODML_DEPTH) {
av_log(s, AV_LOG_ERROR, "Too deeply nested ODML indexes\n");
return AVERROR_INVALIDDATA;
}
if (avio_seek(pb, offset + 8, SEEK_SET) < 0)
return -1;
avi->odml_depth++;
read_odml_index(s, frame_num);
avi->odml_depth--;
frame_num += duration;
if (avio_seek(pb, pos, SEEK_SET) < 0) {
av_log(s, AV_LOG_ERROR, "Failed to restore position after reading index\n");
return -1;
}
}
}
avi->index_loaded = 2;
return 0;
}
static void clean_index(AVFormatContext *s)
{
int i;
int64_t j;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
AVIStream *ast = st->priv_data;
int n = st->nb_index_entries;
int max = ast->sample_size;
int64_t pos, size, ts;
if (n != 1 || ast->sample_size == 0)
continue;
while (max < 1024)
max += max;
pos = st->index_entries[0].pos;
size = st->index_entries[0].size;
ts = st->index_entries[0].timestamp;
for (j = 0; j < size; j += max)
av_add_index_entry(st, pos + j, ts + j, FFMIN(max, size - j), 0,
AVINDEX_KEYFRAME);
}
}
static int avi_read_tag(AVFormatContext *s, AVStream *st, uint32_t tag,
uint32_t size)
{
AVIOContext *pb = s->pb;
char key[5] = { 0 };
char *value;
size += (size & 1);
if (size == UINT_MAX)
return AVERROR(EINVAL);
value = av_malloc(size + 1);
if (!value)
return AVERROR(ENOMEM);
if (avio_read(pb, value, size) != size)
return AVERROR_INVALIDDATA;
value[size] = 0;
AV_WL32(key, tag);
return av_dict_set(st ? &st->metadata : &s->metadata, key, value,
AV_DICT_DONT_STRDUP_VAL);
}
static const char months[12][4] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec" };
static void avi_metadata_creation_time(AVDictionary **metadata, char *date)
{
char month[4], time[9], buffer[64];
int i, day, year;
/* parse standard AVI date format (ie. "Mon Mar 10 15:04:43 2003") */
if (sscanf(date, "%*3s%*[ ]%3s%*[ ]%2d%*[ ]%8s%*[ ]%4d",
month, &day, time, &year) == 4) {
for (i = 0; i < 12; i++)
if (!av_strcasecmp(month, months[i])) {
snprintf(buffer, sizeof(buffer), "%.4d-%.2d-%.2d %s",
year, i + 1, day, time);
av_dict_set(metadata, "creation_time", buffer, 0);
}
} else if (date[4] == '/' && date[7] == '/') {
date[4] = date[7] = '-';
av_dict_set(metadata, "creation_time", date, 0);
}
}
static void avi_read_nikon(AVFormatContext *s, uint64_t end)
{
while (avio_tell(s->pb) < end && !avio_feof(s->pb)) {
uint32_t tag = avio_rl32(s->pb);
uint32_t size = avio_rl32(s->pb);
switch (tag) {
case MKTAG('n', 'c', 't', 'g'): /* Nikon Tags */
{
uint64_t tag_end = avio_tell(s->pb) + size;
while (avio_tell(s->pb) < tag_end && !avio_feof(s->pb)) {
uint16_t tag = avio_rl16(s->pb);
uint16_t size = avio_rl16(s->pb);
const char *name = NULL;
char buffer[64] = { 0 };
size = FFMIN(size, tag_end - avio_tell(s->pb));
size -= avio_read(s->pb, buffer,
FFMIN(size, sizeof(buffer) - 1));
switch (tag) {
case 0x03:
name = "maker";
break;
case 0x04:
name = "model";
break;
case 0x13:
name = "creation_time";
if (buffer[4] == ':' && buffer[7] == ':')
buffer[4] = buffer[7] = '-';
break;
}
if (name)
av_dict_set(&s->metadata, name, buffer, 0);
avio_skip(s->pb, size);
}
break;
}
default:
avio_skip(s->pb, size);
break;
}
}
}
static int avi_extract_stream_metadata(AVFormatContext *s, AVStream *st)
{
GetByteContext gb;
uint8_t *data = st->codecpar->extradata;
int data_size = st->codecpar->extradata_size;
int tag, offset;
if (!data || data_size < 8) {
return AVERROR_INVALIDDATA;
}
bytestream2_init(&gb, data, data_size);
tag = bytestream2_get_le32(&gb);
switch (tag) {
case MKTAG('A', 'V', 'I', 'F'):
// skip 4 byte padding
bytestream2_skip(&gb, 4);
offset = bytestream2_tell(&gb);
bytestream2_init(&gb, data + offset, data_size - offset);
// decode EXIF tags from IFD, AVI is always little-endian
return avpriv_exif_decode_ifd(s, &gb, 1, 0, &st->metadata);
break;
case MKTAG('C', 'A', 'S', 'I'):
avpriv_request_sample(s, "RIFF stream data tag type CASI (%u)", tag);
break;
case MKTAG('Z', 'o', 'r', 'a'):
avpriv_request_sample(s, "RIFF stream data tag type Zora (%u)", tag);
break;
default:
break;
}
return 0;
}
static int calculate_bitrate(AVFormatContext *s)
{
AVIContext *avi = s->priv_data;
int i, j;
int64_t lensum = 0;
int64_t maxpos = 0;
for (i = 0; i<s->nb_streams; i++) {
int64_t len = 0;
AVStream *st = s->streams[i];
if (!st->nb_index_entries)
continue;
for (j = 0; j < st->nb_index_entries; j++)
len += st->index_entries[j].size;
maxpos = FFMAX(maxpos, st->index_entries[j-1].pos);
lensum += len;
}
if (maxpos < avi->io_fsize*9/10) // index does not cover the whole file
return 0;
if (lensum*9/10 > maxpos || lensum < maxpos*9/10) // frame sum and filesize mismatch
return 0;
for (i = 0; i<s->nb_streams; i++) {
int64_t len = 0;
AVStream *st = s->streams[i];
int64_t duration;
int64_t bitrate;
for (j = 0; j < st->nb_index_entries; j++)
len += st->index_entries[j].size;
if (st->nb_index_entries < 2 || st->codecpar->bit_rate > 0)
continue;
duration = st->index_entries[j-1].timestamp - st->index_entries[0].timestamp;
bitrate = av_rescale(8*len, st->time_base.den, duration * st->time_base.num);
if (bitrate <= INT_MAX && bitrate > 0) {
st->codecpar->bit_rate = bitrate;
}
}
return 1;
}
static int avi_read_header(AVFormatContext *s)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int tag, tag1, handler;
int codec_type, stream_index, frame_period;
unsigned int size;
int i;
AVStream *st;
AVIStream *ast = NULL;
int avih_width = 0, avih_height = 0;
int amv_file_format = 0;
uint64_t list_end = 0;
int64_t pos;
int ret;
AVDictionaryEntry *dict_entry;
avi->stream_index = -1;
ret = get_riff(s, pb);
if (ret < 0)
return ret;
av_log(avi, AV_LOG_DEBUG, "use odml:%d\n", avi->use_odml);
avi->io_fsize = avi->fsize = avio_size(pb);
if (avi->fsize <= 0 || avi->fsize < avi->riff_end)
avi->fsize = avi->riff_end == 8 ? INT64_MAX : avi->riff_end;
/* first list tag */
stream_index = -1;
codec_type = -1;
frame_period = 0;
for (;;) {
if (avio_feof(pb))
goto fail;
tag = avio_rl32(pb);
size = avio_rl32(pb);
print_tag("tag", tag, size);
switch (tag) {
case MKTAG('L', 'I', 'S', 'T'):
list_end = avio_tell(pb) + size;
/* Ignored, except at start of video packets. */
tag1 = avio_rl32(pb);
print_tag("list", tag1, 0);
if (tag1 == MKTAG('m', 'o', 'v', 'i')) {
avi->movi_list = avio_tell(pb) - 4;
if (size)
avi->movi_end = avi->movi_list + size + (size & 1);
else
avi->movi_end = avi->fsize;
av_log(NULL, AV_LOG_TRACE, "movi end=%"PRIx64"\n", avi->movi_end);
goto end_of_header;
} else if (tag1 == MKTAG('I', 'N', 'F', 'O'))
ff_read_riff_info(s, size - 4);
else if (tag1 == MKTAG('n', 'c', 'd', 't'))
avi_read_nikon(s, list_end);
break;
case MKTAG('I', 'D', 'I', 'T'):
{
unsigned char date[64] = { 0 };
size += (size & 1);
size -= avio_read(pb, date, FFMIN(size, sizeof(date) - 1));
avio_skip(pb, size);
avi_metadata_creation_time(&s->metadata, date);
break;
}
case MKTAG('d', 'm', 'l', 'h'):
avi->is_odml = 1;
avio_skip(pb, size + (size & 1));
break;
case MKTAG('a', 'm', 'v', 'h'):
amv_file_format = 1;
case MKTAG('a', 'v', 'i', 'h'):
/* AVI header */
/* using frame_period is bad idea */
frame_period = avio_rl32(pb);
avio_rl32(pb); /* max. bytes per second */
avio_rl32(pb);
avi->non_interleaved |= avio_rl32(pb) & AVIF_MUSTUSEINDEX;
avio_skip(pb, 2 * 4);
avio_rl32(pb);
avio_rl32(pb);
avih_width = avio_rl32(pb);
avih_height = avio_rl32(pb);
avio_skip(pb, size - 10 * 4);
break;
case MKTAG('s', 't', 'r', 'h'):
/* stream header */
tag1 = avio_rl32(pb);
handler = avio_rl32(pb); /* codec tag */
if (tag1 == MKTAG('p', 'a', 'd', 's')) {
avio_skip(pb, size - 8);
break;
} else {
stream_index++;
st = avformat_new_stream(s, NULL);
if (!st)
goto fail;
st->id = stream_index;
ast = av_mallocz(sizeof(AVIStream));
if (!ast)
goto fail;
st->priv_data = ast;
}
if (amv_file_format)
tag1 = stream_index ? MKTAG('a', 'u', 'd', 's')
: MKTAG('v', 'i', 'd', 's');
print_tag("strh", tag1, -1);
if (tag1 == MKTAG('i', 'a', 'v', 's') ||
tag1 == MKTAG('i', 'v', 'a', 's')) {
int64_t dv_dur;
/* After some consideration -- I don't think we
* have to support anything but DV in type1 AVIs. */
if (s->nb_streams != 1)
goto fail;
if (handler != MKTAG('d', 'v', 's', 'd') &&
handler != MKTAG('d', 'v', 'h', 'd') &&
handler != MKTAG('d', 'v', 's', 'l'))
goto fail;
ast = s->streams[0]->priv_data;
av_freep(&s->streams[0]->codecpar->extradata);
av_freep(&s->streams[0]->codecpar);
#if FF_API_LAVF_AVCTX
FF_DISABLE_DEPRECATION_WARNINGS
av_freep(&s->streams[0]->codec);
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (s->streams[0]->info)
av_freep(&s->streams[0]->info->duration_error);
av_freep(&s->streams[0]->info);
if (s->streams[0]->internal)
av_freep(&s->streams[0]->internal->avctx);
av_freep(&s->streams[0]->internal);
av_freep(&s->streams[0]);
s->nb_streams = 0;
if (CONFIG_DV_DEMUXER) {
avi->dv_demux = avpriv_dv_init_demux(s);
if (!avi->dv_demux)
goto fail;
} else
goto fail;
s->streams[0]->priv_data = ast;
avio_skip(pb, 3 * 4);
ast->scale = avio_rl32(pb);
ast->rate = avio_rl32(pb);
avio_skip(pb, 4); /* start time */
dv_dur = avio_rl32(pb);
if (ast->scale > 0 && ast->rate > 0 && dv_dur > 0) {
dv_dur *= AV_TIME_BASE;
s->duration = av_rescale(dv_dur, ast->scale, ast->rate);
}
/* else, leave duration alone; timing estimation in utils.c
* will make a guess based on bitrate. */
stream_index = s->nb_streams - 1;
avio_skip(pb, size - 9 * 4);
break;
}
av_assert0(stream_index < s->nb_streams);
ast->handler = handler;
avio_rl32(pb); /* flags */
avio_rl16(pb); /* priority */
avio_rl16(pb); /* language */
avio_rl32(pb); /* initial frame */
ast->scale = avio_rl32(pb);
ast->rate = avio_rl32(pb);
if (!(ast->scale && ast->rate)) {
av_log(s, AV_LOG_WARNING,
"scale/rate is %"PRIu32"/%"PRIu32" which is invalid. "
"(This file has been generated by broken software.)\n",
ast->scale,
ast->rate);
if (frame_period) {
ast->rate = 1000000;
ast->scale = frame_period;
} else {
ast->rate = 25;
ast->scale = 1;
}
}
avpriv_set_pts_info(st, 64, ast->scale, ast->rate);
ast->cum_len = avio_rl32(pb); /* start */
st->nb_frames = avio_rl32(pb);
st->start_time = 0;
avio_rl32(pb); /* buffer size */
avio_rl32(pb); /* quality */
if (ast->cum_len*ast->scale/ast->rate > 3600) {
av_log(s, AV_LOG_ERROR, "crazy start time, iam scared, giving up\n");
ast->cum_len = 0;
}
ast->sample_size = avio_rl32(pb);
ast->cum_len *= FFMAX(1, ast->sample_size);
av_log(s, AV_LOG_TRACE, "%"PRIu32" %"PRIu32" %d\n",
ast->rate, ast->scale, ast->sample_size);
switch (tag1) {
case MKTAG('v', 'i', 'd', 's'):
codec_type = AVMEDIA_TYPE_VIDEO;
ast->sample_size = 0;
st->avg_frame_rate = av_inv_q(st->time_base);
break;
case MKTAG('a', 'u', 'd', 's'):
codec_type = AVMEDIA_TYPE_AUDIO;
break;
case MKTAG('t', 'x', 't', 's'):
codec_type = AVMEDIA_TYPE_SUBTITLE;
break;
case MKTAG('d', 'a', 't', 's'):
codec_type = AVMEDIA_TYPE_DATA;
break;
default:
av_log(s, AV_LOG_INFO, "unknown stream type %X\n", tag1);
}
if (ast->sample_size < 0) {
if (s->error_recognition & AV_EF_EXPLODE) {
av_log(s, AV_LOG_ERROR,
"Invalid sample_size %d at stream %d\n",
ast->sample_size,
stream_index);
goto fail;
}
av_log(s, AV_LOG_WARNING,
"Invalid sample_size %d at stream %d "
"setting it to 0\n",
ast->sample_size,
stream_index);
ast->sample_size = 0;
}
if (ast->sample_size == 0) {
st->duration = st->nb_frames;
if (st->duration > 0 && avi->io_fsize > 0 && avi->riff_end > avi->io_fsize) {
av_log(s, AV_LOG_DEBUG, "File is truncated adjusting duration\n");
st->duration = av_rescale(st->duration, avi->io_fsize, avi->riff_end);
}
}
ast->frame_offset = ast->cum_len;
avio_skip(pb, size - 12 * 4);
break;
case MKTAG('s', 't', 'r', 'f'):
/* stream header */
if (!size && (codec_type == AVMEDIA_TYPE_AUDIO ||
codec_type == AVMEDIA_TYPE_VIDEO))
break;
if (stream_index >= (unsigned)s->nb_streams || avi->dv_demux) {
avio_skip(pb, size);
} else {
uint64_t cur_pos = avio_tell(pb);
unsigned esize;
if (cur_pos < list_end)
size = FFMIN(size, list_end - cur_pos);
st = s->streams[stream_index];
if (st->codecpar->codec_type != AVMEDIA_TYPE_UNKNOWN) {
avio_skip(pb, size);
break;
}
switch (codec_type) {
case AVMEDIA_TYPE_VIDEO:
if (amv_file_format) {
st->codecpar->width = avih_width;
st->codecpar->height = avih_height;
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_AMV;
avio_skip(pb, size);
break;
}
tag1 = ff_get_bmp_header(pb, st, &esize);
if (tag1 == MKTAG('D', 'X', 'S', 'B') ||
tag1 == MKTAG('D', 'X', 'S', 'A')) {
st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codecpar->codec_tag = tag1;
st->codecpar->codec_id = AV_CODEC_ID_XSUB;
break;
}
if (size > 10 * 4 && size < (1 << 30) && size < avi->fsize) {
if (esize == size-1 && (esize&1)) {
st->codecpar->extradata_size = esize - 10 * 4;
} else
st->codecpar->extradata_size = size - 10 * 4;
if (st->codecpar->extradata) {
av_log(s, AV_LOG_WARNING, "New extradata in strf chunk, freeing previous one.\n");
av_freep(&st->codecpar->extradata);
}
if (ff_get_extradata(s, st->codecpar, pb, st->codecpar->extradata_size) < 0)
return AVERROR(ENOMEM);
}
// FIXME: check if the encoder really did this correctly
if (st->codecpar->extradata_size & 1)
avio_r8(pb);
/* Extract palette from extradata if bpp <= 8.
* This code assumes that extradata contains only palette.
* This is true for all paletted codecs implemented in
* FFmpeg. */
if (st->codecpar->extradata_size &&
(st->codecpar->bits_per_coded_sample <= 8)) {
int pal_size = (1 << st->codecpar->bits_per_coded_sample) << 2;
const uint8_t *pal_src;
pal_size = FFMIN(pal_size, st->codecpar->extradata_size);
pal_src = st->codecpar->extradata +
st->codecpar->extradata_size - pal_size;
/* Exclude the "BottomUp" field from the palette */
if (pal_src - st->codecpar->extradata >= 9 &&
!memcmp(st->codecpar->extradata + st->codecpar->extradata_size - 9, "BottomUp", 9))
pal_src -= 9;
for (i = 0; i < pal_size / 4; i++)
ast->pal[i] = 0xFFU<<24 | AV_RL32(pal_src + 4 * i);
ast->has_pal = 1;
}
print_tag("video", tag1, 0);
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_tag = tag1;
st->codecpar->codec_id = ff_codec_get_id(ff_codec_bmp_tags,
tag1);
/* If codec is not found yet, try with the mov tags. */
if (!st->codecpar->codec_id) {
st->codecpar->codec_id =
ff_codec_get_id(ff_codec_movvideo_tags, tag1);
if (st->codecpar->codec_id)
av_log(s, AV_LOG_WARNING,
"mov tag found in avi (fourcc %s)\n",
av_fourcc2str(tag1));
}
/* This is needed to get the pict type which is necessary
* for generating correct pts. */
st->need_parsing = AVSTREAM_PARSE_HEADERS;
if (st->codecpar->codec_id == AV_CODEC_ID_MPEG4 &&
ast->handler == MKTAG('X', 'V', 'I', 'D'))
st->codecpar->codec_tag = MKTAG('X', 'V', 'I', 'D');
if (st->codecpar->codec_tag == MKTAG('V', 'S', 'S', 'H'))
st->need_parsing = AVSTREAM_PARSE_FULL;
if (st->codecpar->codec_id == AV_CODEC_ID_RV40)
st->need_parsing = AVSTREAM_PARSE_NONE;
if (st->codecpar->codec_tag == 0 && st->codecpar->height > 0 &&
st->codecpar->extradata_size < 1U << 30) {
st->codecpar->extradata_size += 9;
if ((ret = av_reallocp(&st->codecpar->extradata,
st->codecpar->extradata_size +
AV_INPUT_BUFFER_PADDING_SIZE)) < 0) {
st->codecpar->extradata_size = 0;
return ret;
} else
memcpy(st->codecpar->extradata + st->codecpar->extradata_size - 9,
"BottomUp", 9);
}
st->codecpar->height = FFABS(st->codecpar->height);
// avio_skip(pb, size - 5 * 4);
break;
case AVMEDIA_TYPE_AUDIO:
ret = ff_get_wav_header(s, pb, st->codecpar, size, 0);
if (ret < 0)
return ret;
ast->dshow_block_align = st->codecpar->block_align;
if (ast->sample_size && st->codecpar->block_align &&
ast->sample_size != st->codecpar->block_align) {
av_log(s,
AV_LOG_WARNING,
"sample size (%d) != block align (%d)\n",
ast->sample_size,
st->codecpar->block_align);
ast->sample_size = st->codecpar->block_align;
}
/* 2-aligned
* (fix for Stargate SG-1 - 3x18 - Shades of Grey.avi) */
if (size & 1)
avio_skip(pb, 1);
/* Force parsing as several audio frames can be in
* one packet and timestamps refer to packet start. */
st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
/* ADTS header is in extradata, AAC without header must be
* stored as exact frames. Parser not needed and it will
* fail. */
if (st->codecpar->codec_id == AV_CODEC_ID_AAC &&
st->codecpar->extradata_size)
st->need_parsing = AVSTREAM_PARSE_NONE;
// The flac parser does not work with AVSTREAM_PARSE_TIMESTAMPS
if (st->codecpar->codec_id == AV_CODEC_ID_FLAC)
st->need_parsing = AVSTREAM_PARSE_NONE;
/* AVI files with Xan DPCM audio (wrongly) declare PCM
* audio in the header but have Axan as stream_code_tag. */
if (ast->handler == AV_RL32("Axan")) {
st->codecpar->codec_id = AV_CODEC_ID_XAN_DPCM;
st->codecpar->codec_tag = 0;
ast->dshow_block_align = 0;
}
if (amv_file_format) {
st->codecpar->codec_id = AV_CODEC_ID_ADPCM_IMA_AMV;
ast->dshow_block_align = 0;
}
if ((st->codecpar->codec_id == AV_CODEC_ID_AAC ||
st->codecpar->codec_id == AV_CODEC_ID_FLAC ||
st->codecpar->codec_id == AV_CODEC_ID_MP2 ) && ast->dshow_block_align <= 4 && ast->dshow_block_align) {
av_log(s, AV_LOG_DEBUG, "overriding invalid dshow_block_align of %d\n", ast->dshow_block_align);
ast->dshow_block_align = 0;
}
if (st->codecpar->codec_id == AV_CODEC_ID_AAC && ast->dshow_block_align == 1024 && ast->sample_size == 1024 ||
st->codecpar->codec_id == AV_CODEC_ID_AAC && ast->dshow_block_align == 4096 && ast->sample_size == 4096 ||
st->codecpar->codec_id == AV_CODEC_ID_MP3 && ast->dshow_block_align == 1152 && ast->sample_size == 1152) {
av_log(s, AV_LOG_DEBUG, "overriding sample_size\n");
ast->sample_size = 0;
}
break;
case AVMEDIA_TYPE_SUBTITLE:
st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->request_probe= 1;
avio_skip(pb, size);
break;
default:
st->codecpar->codec_type = AVMEDIA_TYPE_DATA;
st->codecpar->codec_id = AV_CODEC_ID_NONE;
st->codecpar->codec_tag = 0;
avio_skip(pb, size);
break;
}
}
break;
case MKTAG('s', 't', 'r', 'd'):
if (stream_index >= (unsigned)s->nb_streams
|| s->streams[stream_index]->codecpar->extradata_size
|| s->streams[stream_index]->codecpar->codec_tag == MKTAG('H','2','6','4')) {
avio_skip(pb, size);
} else {
uint64_t cur_pos = avio_tell(pb);
if (cur_pos < list_end)
size = FFMIN(size, list_end - cur_pos);
st = s->streams[stream_index];
if (size<(1<<30)) {
if (st->codecpar->extradata) {
av_log(s, AV_LOG_WARNING, "New extradata in strd chunk, freeing previous one.\n");
av_freep(&st->codecpar->extradata);
}
if (ff_get_extradata(s, st->codecpar, pb, size) < 0)
return AVERROR(ENOMEM);
}
if (st->codecpar->extradata_size & 1) //FIXME check if the encoder really did this correctly
avio_r8(pb);
ret = avi_extract_stream_metadata(s, st);
if (ret < 0) {
av_log(s, AV_LOG_WARNING, "could not decoding EXIF data in stream header.\n");
}
}
break;
case MKTAG('i', 'n', 'd', 'x'):
pos = avio_tell(pb);
if ((pb->seekable & AVIO_SEEKABLE_NORMAL) && !(s->flags & AVFMT_FLAG_IGNIDX) &&
avi->use_odml &&
read_odml_index(s, 0) < 0 &&
(s->error_recognition & AV_EF_EXPLODE))
goto fail;
avio_seek(pb, pos + size, SEEK_SET);
break;
case MKTAG('v', 'p', 'r', 'p'):
if (stream_index < (unsigned)s->nb_streams && size > 9 * 4) {
AVRational active, active_aspect;
st = s->streams[stream_index];
avio_rl32(pb);
avio_rl32(pb);
avio_rl32(pb);
avio_rl32(pb);
avio_rl32(pb);
active_aspect.den = avio_rl16(pb);
active_aspect.num = avio_rl16(pb);
active.num = avio_rl32(pb);
active.den = avio_rl32(pb);
avio_rl32(pb); // nbFieldsPerFrame
if (active_aspect.num && active_aspect.den &&
active.num && active.den) {
st->sample_aspect_ratio = av_div_q(active_aspect, active);
av_log(s, AV_LOG_TRACE, "vprp %d/%d %d/%d\n",
active_aspect.num, active_aspect.den,
active.num, active.den);
}
size -= 9 * 4;
}
avio_skip(pb, size);
break;
case MKTAG('s', 't', 'r', 'n'):
if (s->nb_streams) {
ret = avi_read_tag(s, s->streams[s->nb_streams - 1], tag, size);
if (ret < 0)
return ret;
break;
}
default:
if (size > 1000000) {
av_log(s, AV_LOG_ERROR,
"Something went wrong during header parsing, "
"tag %s has size %u, "
"I will ignore it and try to continue anyway.\n",
av_fourcc2str(tag), size);
if (s->error_recognition & AV_EF_EXPLODE)
goto fail;
avi->movi_list = avio_tell(pb) - 4;
avi->movi_end = avi->fsize;
goto end_of_header;
}
/* Do not fail for very large idx1 tags */
case MKTAG('i', 'd', 'x', '1'):
/* skip tag */
size += (size & 1);
avio_skip(pb, size);
break;
}
}
end_of_header:
/* check stream number */
if (stream_index != s->nb_streams - 1) {
fail:
return AVERROR_INVALIDDATA;
}
if (!avi->index_loaded && (pb->seekable & AVIO_SEEKABLE_NORMAL))
avi_load_index(s);
calculate_bitrate(s);
avi->index_loaded |= 1;
if ((ret = guess_ni_flag(s)) < 0)
return ret;
avi->non_interleaved |= ret | (s->flags & AVFMT_FLAG_SORT_DTS);
dict_entry = av_dict_get(s->metadata, "ISFT", NULL, 0);
if (dict_entry && !strcmp(dict_entry->value, "PotEncoder"))
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if ( st->codecpar->codec_id == AV_CODEC_ID_MPEG1VIDEO
|| st->codecpar->codec_id == AV_CODEC_ID_MPEG2VIDEO)
st->need_parsing = AVSTREAM_PARSE_FULL;
}
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (st->nb_index_entries)
break;
}
// DV-in-AVI cannot be non-interleaved, if set this must be
// a mis-detection.
if (avi->dv_demux)
avi->non_interleaved = 0;
if (i == s->nb_streams && avi->non_interleaved) {
av_log(s, AV_LOG_WARNING,
"Non-interleaved AVI without index, switching to interleaved\n");
avi->non_interleaved = 0;
}
if (avi->non_interleaved) {
av_log(s, AV_LOG_INFO, "non-interleaved AVI\n");
clean_index(s);
}
ff_metadata_conv_ctx(s, NULL, avi_metadata_conv);
ff_metadata_conv_ctx(s, NULL, ff_riff_info_conv);
return 0;
}
static int read_gab2_sub(AVFormatContext *s, AVStream *st, AVPacket *pkt)
{
if (pkt->size >= 7 &&
pkt->size < INT_MAX - AVPROBE_PADDING_SIZE &&
!strcmp(pkt->data, "GAB2") && AV_RL16(pkt->data + 5) == 2) {
uint8_t desc[256];
int score = AVPROBE_SCORE_EXTENSION, ret;
AVIStream *ast = st->priv_data;
AVInputFormat *sub_demuxer;
AVRational time_base;
int size;
AVIOContext *pb = avio_alloc_context(pkt->data + 7,
pkt->size - 7,
0, NULL, NULL, NULL, NULL);
AVProbeData pd;
unsigned int desc_len = avio_rl32(pb);
if (desc_len > pb->buf_end - pb->buf_ptr)
goto error;
ret = avio_get_str16le(pb, desc_len, desc, sizeof(desc));
avio_skip(pb, desc_len - ret);
if (*desc)
av_dict_set(&st->metadata, "title", desc, 0);
avio_rl16(pb); /* flags? */
avio_rl32(pb); /* data size */
size = pb->buf_end - pb->buf_ptr;
pd = (AVProbeData) { .buf = av_mallocz(size + AVPROBE_PADDING_SIZE),
.buf_size = size };
if (!pd.buf)
goto error;
memcpy(pd.buf, pb->buf_ptr, size);
sub_demuxer = av_probe_input_format2(&pd, 1, &score);
av_freep(&pd.buf);
if (!sub_demuxer)
goto error;
if (strcmp(sub_demuxer->name, "srt") && strcmp(sub_demuxer->name, "ass"))
goto error;
if (!(ast->sub_ctx = avformat_alloc_context()))
goto error;
ast->sub_ctx->pb = pb;
if (ff_copy_whiteblacklists(ast->sub_ctx, s) < 0)
goto error;
if (!avformat_open_input(&ast->sub_ctx, "", sub_demuxer, NULL)) {
if (ast->sub_ctx->nb_streams != 1)
goto error;
ff_read_packet(ast->sub_ctx, &ast->sub_pkt);
avcodec_parameters_copy(st->codecpar, ast->sub_ctx->streams[0]->codecpar);
time_base = ast->sub_ctx->streams[0]->time_base;
avpriv_set_pts_info(st, 64, time_base.num, time_base.den);
}
ast->sub_buffer = pkt->data;
memset(pkt, 0, sizeof(*pkt));
return 1;
error:
av_freep(&ast->sub_ctx);
av_freep(&pb);
}
return 0;
}
static AVStream *get_subtitle_pkt(AVFormatContext *s, AVStream *next_st,
AVPacket *pkt)
{
AVIStream *ast, *next_ast = next_st->priv_data;
int64_t ts, next_ts, ts_min = INT64_MAX;
AVStream *st, *sub_st = NULL;
int i;
next_ts = av_rescale_q(next_ast->frame_offset, next_st->time_base,
AV_TIME_BASE_Q);
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
ast = st->priv_data;
if (st->discard < AVDISCARD_ALL && ast && ast->sub_pkt.data) {
ts = av_rescale_q(ast->sub_pkt.dts, st->time_base, AV_TIME_BASE_Q);
if (ts <= next_ts && ts < ts_min) {
ts_min = ts;
sub_st = st;
}
}
}
if (sub_st) {
ast = sub_st->priv_data;
*pkt = ast->sub_pkt;
pkt->stream_index = sub_st->index;
if (ff_read_packet(ast->sub_ctx, &ast->sub_pkt) < 0)
ast->sub_pkt.data = NULL;
}
return sub_st;
}
static int get_stream_idx(const unsigned *d)
{
if (d[0] >= '0' && d[0] <= '9' &&
d[1] >= '0' && d[1] <= '9') {
return (d[0] - '0') * 10 + (d[1] - '0');
} else {
return 100; // invalid stream ID
}
}
/**
*
* @param exit_early set to 1 to just gather packet position without making the changes needed to actually read & return the packet
*/
static int avi_sync(AVFormatContext *s, int exit_early)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
int n;
unsigned int d[8];
unsigned int size;
int64_t i, sync;
start_sync:
memset(d, -1, sizeof(d));
for (i = sync = avio_tell(pb); !avio_feof(pb); i++) {
int j;
for (j = 0; j < 7; j++)
d[j] = d[j + 1];
d[7] = avio_r8(pb);
size = d[4] + (d[5] << 8) + (d[6] << 16) + (d[7] << 24);
n = get_stream_idx(d + 2);
ff_tlog(s, "%X %X %X %X %X %X %X %X %"PRId64" %u %d\n",
d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], i, size, n);
if (i*(avi->io_fsize>0) + (uint64_t)size > avi->fsize || d[0] > 127)
continue;
// parse ix##
if ((d[0] == 'i' && d[1] == 'x' && n < s->nb_streams) ||
// parse JUNK
(d[0] == 'J' && d[1] == 'U' && d[2] == 'N' && d[3] == 'K') ||
(d[0] == 'i' && d[1] == 'd' && d[2] == 'x' && d[3] == '1') ||
(d[0] == 'i' && d[1] == 'n' && d[2] == 'd' && d[3] == 'x')) {
avio_skip(pb, size);
goto start_sync;
}
// parse stray LIST
if (d[0] == 'L' && d[1] == 'I' && d[2] == 'S' && d[3] == 'T') {
avio_skip(pb, 4);
goto start_sync;
}
n = get_stream_idx(d);
if (!((i - avi->last_pkt_pos) & 1) &&
get_stream_idx(d + 1) < s->nb_streams)
continue;
// detect ##ix chunk and skip
if (d[2] == 'i' && d[3] == 'x' && n < s->nb_streams) {
avio_skip(pb, size);
goto start_sync;
}
if (avi->dv_demux && n != 0)
continue;
// parse ##dc/##wb
if (n < s->nb_streams) {
AVStream *st;
AVIStream *ast;
st = s->streams[n];
ast = st->priv_data;
if (!ast) {
av_log(s, AV_LOG_WARNING, "Skipping foreign stream %d packet\n", n);
continue;
}
if (s->nb_streams >= 2) {
AVStream *st1 = s->streams[1];
AVIStream *ast1 = st1->priv_data;
// workaround for broken small-file-bug402.avi
if ( d[2] == 'w' && d[3] == 'b'
&& n == 0
&& st ->codecpar->codec_type == AVMEDIA_TYPE_VIDEO
&& st1->codecpar->codec_type == AVMEDIA_TYPE_AUDIO
&& ast->prefix == 'd'*256+'c'
&& (d[2]*256+d[3] == ast1->prefix || !ast1->prefix_count)
) {
n = 1;
st = st1;
ast = ast1;
av_log(s, AV_LOG_WARNING,
"Invalid stream + prefix combination, assuming audio.\n");
}
}
if (!avi->dv_demux &&
((st->discard >= AVDISCARD_DEFAULT && size == 0) /* ||
// FIXME: needs a little reordering
(st->discard >= AVDISCARD_NONKEY &&
!(pkt->flags & AV_PKT_FLAG_KEY)) */
|| st->discard >= AVDISCARD_ALL)) {
if (!exit_early) {
ast->frame_offset += get_duration(ast, size);
avio_skip(pb, size);
goto start_sync;
}
}
if (d[2] == 'p' && d[3] == 'c' && size <= 4 * 256 + 4) {
int k = avio_r8(pb);
int last = (k + avio_r8(pb) - 1) & 0xFF;
avio_rl16(pb); // flags
// b + (g << 8) + (r << 16);
for (; k <= last; k++)
ast->pal[k] = 0xFFU<<24 | avio_rb32(pb)>>8;
ast->has_pal = 1;
goto start_sync;
} else if (((ast->prefix_count < 5 || sync + 9 > i) &&
d[2] < 128 && d[3] < 128) ||
d[2] * 256 + d[3] == ast->prefix /* ||
(d[2] == 'd' && d[3] == 'c') ||
(d[2] == 'w' && d[3] == 'b') */) {
if (exit_early)
return 0;
if (d[2] * 256 + d[3] == ast->prefix)
ast->prefix_count++;
else {
ast->prefix = d[2] * 256 + d[3];
ast->prefix_count = 0;
}
avi->stream_index = n;
ast->packet_size = size + 8;
ast->remaining = size;
if (size) {
uint64_t pos = avio_tell(pb) - 8;
if (!st->index_entries || !st->nb_index_entries ||
st->index_entries[st->nb_index_entries - 1].pos < pos) {
av_add_index_entry(st, pos, ast->frame_offset, size,
0, AVINDEX_KEYFRAME);
}
}
return 0;
}
}
}
if (pb->error)
return pb->error;
return AVERROR_EOF;
}
static int ni_prepare_read(AVFormatContext *s)
{
AVIContext *avi = s->priv_data;
int best_stream_index = 0;
AVStream *best_st = NULL;
AVIStream *best_ast;
int64_t best_ts = INT64_MAX;
int i;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
AVIStream *ast = st->priv_data;
int64_t ts = ast->frame_offset;
int64_t last_ts;
if (!st->nb_index_entries)
continue;
last_ts = st->index_entries[st->nb_index_entries - 1].timestamp;
if (!ast->remaining && ts > last_ts)
continue;
ts = av_rescale_q(ts, st->time_base,
(AVRational) { FFMAX(1, ast->sample_size),
AV_TIME_BASE });
av_log(s, AV_LOG_TRACE, "%"PRId64" %d/%d %"PRId64"\n", ts,
st->time_base.num, st->time_base.den, ast->frame_offset);
if (ts < best_ts) {
best_ts = ts;
best_st = st;
best_stream_index = i;
}
}
if (!best_st)
return AVERROR_EOF;
best_ast = best_st->priv_data;
best_ts = best_ast->frame_offset;
if (best_ast->remaining) {
i = av_index_search_timestamp(best_st,
best_ts,
AVSEEK_FLAG_ANY |
AVSEEK_FLAG_BACKWARD);
} else {
i = av_index_search_timestamp(best_st, best_ts, AVSEEK_FLAG_ANY);
if (i >= 0)
best_ast->frame_offset = best_st->index_entries[i].timestamp;
}
if (i >= 0) {
int64_t pos = best_st->index_entries[i].pos;
pos += best_ast->packet_size - best_ast->remaining;
if (avio_seek(s->pb, pos + 8, SEEK_SET) < 0)
return AVERROR_EOF;
av_assert0(best_ast->remaining <= best_ast->packet_size);
avi->stream_index = best_stream_index;
if (!best_ast->remaining)
best_ast->packet_size =
best_ast->remaining = best_st->index_entries[i].size;
}
else
return AVERROR_EOF;
return 0;
}
static int avi_read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
int err;
if (CONFIG_DV_DEMUXER && avi->dv_demux) {
int size = avpriv_dv_get_packet(avi->dv_demux, pkt);
if (size >= 0)
return size;
else
goto resync;
}
if (avi->non_interleaved) {
err = ni_prepare_read(s);
if (err < 0)
return err;
}
resync:
if (avi->stream_index >= 0) {
AVStream *st = s->streams[avi->stream_index];
AVIStream *ast = st->priv_data;
int size, err;
if (get_subtitle_pkt(s, st, pkt))
return 0;
// minorityreport.AVI block_align=1024 sample_size=1 IMA-ADPCM
if (ast->sample_size <= 1)
size = INT_MAX;
else if (ast->sample_size < 32)
// arbitrary multiplier to avoid tiny packets for raw PCM data
size = 1024 * ast->sample_size;
else
size = ast->sample_size;
if (size > ast->remaining)
size = ast->remaining;
avi->last_pkt_pos = avio_tell(pb);
err = av_get_packet(pb, pkt, size);
if (err < 0)
return err;
size = err;
if (ast->has_pal && pkt->size < (unsigned)INT_MAX / 2) {
uint8_t *pal;
pal = av_packet_new_side_data(pkt,
AV_PKT_DATA_PALETTE,
AVPALETTE_SIZE);
if (!pal) {
av_log(s, AV_LOG_ERROR,
"Failed to allocate data for palette\n");
} else {
memcpy(pal, ast->pal, AVPALETTE_SIZE);
ast->has_pal = 0;
}
}
if (CONFIG_DV_DEMUXER && avi->dv_demux) {
AVBufferRef *avbuf = pkt->buf;
size = avpriv_dv_produce_packet(avi->dv_demux, pkt,
pkt->data, pkt->size, pkt->pos);
pkt->buf = avbuf;
pkt->flags |= AV_PKT_FLAG_KEY;
if (size < 0)
av_packet_unref(pkt);
} else if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE &&
!st->codecpar->codec_tag && read_gab2_sub(s, st, pkt)) {
ast->frame_offset++;
avi->stream_index = -1;
ast->remaining = 0;
goto resync;
} else {
/* XXX: How to handle B-frames in AVI? */
pkt->dts = ast->frame_offset;
// pkt->dts += ast->start;
if (ast->sample_size)
pkt->dts /= ast->sample_size;
pkt->stream_index = avi->stream_index;
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && st->index_entries) {
AVIndexEntry *e;
int index;
index = av_index_search_timestamp(st, ast->frame_offset, AVSEEK_FLAG_ANY);
e = &st->index_entries[index];
if (index >= 0 && e->timestamp == ast->frame_offset) {
if (index == st->nb_index_entries-1) {
int key=1;
uint32_t state=-1;
if (st->codecpar->codec_id == AV_CODEC_ID_MPEG4) {
const uint8_t *ptr = pkt->data, *end = ptr + FFMIN(size, 256);
while (ptr < end) {
ptr = avpriv_find_start_code(ptr, end, &state);
if (state == 0x1B6 && ptr < end) {
key = !(*ptr & 0xC0);
break;
}
}
}
if (!key)
e->flags &= ~AVINDEX_KEYFRAME;
}
if (e->flags & AVINDEX_KEYFRAME)
pkt->flags |= AV_PKT_FLAG_KEY;
}
} else {
pkt->flags |= AV_PKT_FLAG_KEY;
}
ast->frame_offset += get_duration(ast, pkt->size);
}
ast->remaining -= err;
if (!ast->remaining) {
avi->stream_index = -1;
ast->packet_size = 0;
}
if (!avi->non_interleaved && pkt->pos >= 0 && ast->seek_pos > pkt->pos) {
av_packet_unref(pkt);
goto resync;
}
ast->seek_pos= 0;
if (!avi->non_interleaved && st->nb_index_entries>1 && avi->index_loaded>1) {
int64_t dts= av_rescale_q(pkt->dts, st->time_base, AV_TIME_BASE_Q);
if (avi->dts_max - dts > 2*AV_TIME_BASE) {
avi->non_interleaved= 1;
av_log(s, AV_LOG_INFO, "Switching to NI mode, due to poor interleaving\n");
}else if (avi->dts_max < dts)
avi->dts_max = dts;
}
return 0;
}
if ((err = avi_sync(s, 0)) < 0)
return err;
goto resync;
}
/* XXX: We make the implicit supposition that the positions are sorted
* for each stream. */
static int avi_read_idx1(AVFormatContext *s, int size)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
int nb_index_entries, i;
AVStream *st;
AVIStream *ast;
int64_t pos;
unsigned int index, tag, flags, len, first_packet = 1;
int64_t last_pos = -1;
unsigned last_idx = -1;
int64_t idx1_pos, first_packet_pos = 0, data_offset = 0;
int anykey = 0;
nb_index_entries = size / 16;
if (nb_index_entries <= 0)
return AVERROR_INVALIDDATA;
idx1_pos = avio_tell(pb);
avio_seek(pb, avi->movi_list + 4, SEEK_SET);
if (avi_sync(s, 1) == 0)
first_packet_pos = avio_tell(pb) - 8;
avi->stream_index = -1;
avio_seek(pb, idx1_pos, SEEK_SET);
if (s->nb_streams == 1 && s->streams[0]->codecpar->codec_tag == AV_RL32("MMES")) {
first_packet_pos = 0;
data_offset = avi->movi_list;
}
/* Read the entries and sort them in each stream component. */
for (i = 0; i < nb_index_entries; i++) {
if (avio_feof(pb))
return -1;
tag = avio_rl32(pb);
flags = avio_rl32(pb);
pos = avio_rl32(pb);
len = avio_rl32(pb);
av_log(s, AV_LOG_TRACE, "%d: tag=0x%x flags=0x%x pos=0x%"PRIx64" len=%d/",
i, tag, flags, pos, len);
index = ((tag & 0xff) - '0') * 10;
index += (tag >> 8 & 0xff) - '0';
if (index >= s->nb_streams)
continue;
st = s->streams[index];
ast = st->priv_data;
/* Skip 'xxpc' palette change entries in the index until a logic
* to process these is properly implemented. */
if ((tag >> 16 & 0xff) == 'p' && (tag >> 24 & 0xff) == 'c')
continue;
if (first_packet && first_packet_pos) {
if (avi->movi_list + 4 != pos || pos + 500 > first_packet_pos)
data_offset = first_packet_pos - pos;
first_packet = 0;
}
pos += data_offset;
av_log(s, AV_LOG_TRACE, "%d cum_len=%"PRId64"\n", len, ast->cum_len);
// even if we have only a single stream, we should
// switch to non-interleaved to get correct timestamps
if (last_pos == pos)
avi->non_interleaved = 1;
if (last_idx != pos && len) {
av_add_index_entry(st, pos, ast->cum_len, len, 0,
(flags & AVIIF_INDEX) ? AVINDEX_KEYFRAME : 0);
last_idx= pos;
}
ast->cum_len += get_duration(ast, len);
last_pos = pos;
anykey |= flags&AVIIF_INDEX;
}
if (!anykey) {
for (index = 0; index < s->nb_streams; index++) {
st = s->streams[index];
if (st->nb_index_entries)
st->index_entries[0].flags |= AVINDEX_KEYFRAME;
}
}
return 0;
}
/* Scan the index and consider any file with streams more than
* 2 seconds or 64MB apart non-interleaved. */
static int check_stream_max_drift(AVFormatContext *s)
{
int64_t min_pos, pos;
int i;
int *idx = av_mallocz_array(s->nb_streams, sizeof(*idx));
if (!idx)
return AVERROR(ENOMEM);
for (min_pos = pos = 0; min_pos != INT64_MAX; pos = min_pos + 1LU) {
int64_t max_dts = INT64_MIN / 2;
int64_t min_dts = INT64_MAX / 2;
int64_t max_buffer = 0;
min_pos = INT64_MAX;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
AVIStream *ast = st->priv_data;
int n = st->nb_index_entries;
while (idx[i] < n && st->index_entries[idx[i]].pos < pos)
idx[i]++;
if (idx[i] < n) {
int64_t dts;
dts = av_rescale_q(st->index_entries[idx[i]].timestamp /
FFMAX(ast->sample_size, 1),
st->time_base, AV_TIME_BASE_Q);
min_dts = FFMIN(min_dts, dts);
min_pos = FFMIN(min_pos, st->index_entries[idx[i]].pos);
}
}
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
AVIStream *ast = st->priv_data;
if (idx[i] && min_dts != INT64_MAX / 2) {
int64_t dts;
dts = av_rescale_q(st->index_entries[idx[i] - 1].timestamp /
FFMAX(ast->sample_size, 1),
st->time_base, AV_TIME_BASE_Q);
max_dts = FFMAX(max_dts, dts);
max_buffer = FFMAX(max_buffer,
av_rescale(dts - min_dts,
st->codecpar->bit_rate,
AV_TIME_BASE));
}
}
if (max_dts - min_dts > 2 * AV_TIME_BASE ||
max_buffer > 1024 * 1024 * 8 * 8) {
av_free(idx);
return 1;
}
}
av_free(idx);
return 0;
}
static int guess_ni_flag(AVFormatContext *s)
{
int i;
int64_t last_start = 0;
int64_t first_end = INT64_MAX;
int64_t oldpos = avio_tell(s->pb);
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
int n = st->nb_index_entries;
unsigned int size;
if (n <= 0)
continue;
if (n >= 2) {
int64_t pos = st->index_entries[0].pos;
unsigned tag[2];
avio_seek(s->pb, pos, SEEK_SET);
tag[0] = avio_r8(s->pb);
tag[1] = avio_r8(s->pb);
avio_rl16(s->pb);
size = avio_rl32(s->pb);
if (get_stream_idx(tag) == i && pos + size > st->index_entries[1].pos)
last_start = INT64_MAX;
if (get_stream_idx(tag) == i && size == st->index_entries[0].size + 8)
last_start = INT64_MAX;
}
if (st->index_entries[0].pos > last_start)
last_start = st->index_entries[0].pos;
if (st->index_entries[n - 1].pos < first_end)
first_end = st->index_entries[n - 1].pos;
}
avio_seek(s->pb, oldpos, SEEK_SET);
if (last_start > first_end)
return 1;
return check_stream_max_drift(s);
}
static int avi_load_index(AVFormatContext *s)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
uint32_t tag, size;
int64_t pos = avio_tell(pb);
int64_t next;
int ret = -1;
if (avio_seek(pb, avi->movi_end, SEEK_SET) < 0)
goto the_end; // maybe truncated file
av_log(s, AV_LOG_TRACE, "movi_end=0x%"PRIx64"\n", avi->movi_end);
for (;;) {
tag = avio_rl32(pb);
size = avio_rl32(pb);
if (avio_feof(pb))
break;
next = avio_tell(pb) + size + (size & 1);
if (tag == MKTAG('i', 'd', 'x', '1') &&
avi_read_idx1(s, size) >= 0) {
avi->index_loaded=2;
ret = 0;
}else if (tag == MKTAG('L', 'I', 'S', 'T')) {
uint32_t tag1 = avio_rl32(pb);
if (tag1 == MKTAG('I', 'N', 'F', 'O'))
ff_read_riff_info(s, size - 4);
}else if (!ret)
break;
if (avio_seek(pb, next, SEEK_SET) < 0)
break; // something is wrong here
}
the_end:
avio_seek(pb, pos, SEEK_SET);
return ret;
}
static void seek_subtitle(AVStream *st, AVStream *st2, int64_t timestamp)
{
AVIStream *ast2 = st2->priv_data;
int64_t ts2 = av_rescale_q(timestamp, st->time_base, st2->time_base);
av_packet_unref(&ast2->sub_pkt);
if (avformat_seek_file(ast2->sub_ctx, 0, INT64_MIN, ts2, ts2, 0) >= 0 ||
avformat_seek_file(ast2->sub_ctx, 0, ts2, ts2, INT64_MAX, 0) >= 0)
ff_read_packet(ast2->sub_ctx, &ast2->sub_pkt);
}
static int avi_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
AVIContext *avi = s->priv_data;
AVStream *st;
int i, index;
int64_t pos, pos_min;
AVIStream *ast;
/* Does not matter which stream is requested dv in avi has the
* stream information in the first video stream.
*/
if (avi->dv_demux)
stream_index = 0;
if (!avi->index_loaded) {
/* we only load the index on demand */
avi_load_index(s);
avi->index_loaded |= 1;
}
av_assert0(stream_index >= 0);
st = s->streams[stream_index];
ast = st->priv_data;
index = av_index_search_timestamp(st,
timestamp * FFMAX(ast->sample_size, 1),
flags);
if (index < 0) {
if (st->nb_index_entries > 0)
av_log(s, AV_LOG_DEBUG, "Failed to find timestamp %"PRId64 " in index %"PRId64 " .. %"PRId64 "\n",
timestamp * FFMAX(ast->sample_size, 1),
st->index_entries[0].timestamp,
st->index_entries[st->nb_index_entries - 1].timestamp);
return AVERROR_INVALIDDATA;
}
/* find the position */
pos = st->index_entries[index].pos;
timestamp = st->index_entries[index].timestamp / FFMAX(ast->sample_size, 1);
av_log(s, AV_LOG_TRACE, "XX %"PRId64" %d %"PRId64"\n",
timestamp, index, st->index_entries[index].timestamp);
if (CONFIG_DV_DEMUXER && avi->dv_demux) {
/* One and only one real stream for DV in AVI, and it has video */
/* offsets. Calling with other stream indexes should have failed */
/* the av_index_search_timestamp call above. */
if (avio_seek(s->pb, pos, SEEK_SET) < 0)
return -1;
/* Feed the DV video stream version of the timestamp to the */
/* DV demux so it can synthesize correct timestamps. */
ff_dv_offset_reset(avi->dv_demux, timestamp);
avi->stream_index = -1;
return 0;
}
pos_min = pos;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st2 = s->streams[i];
AVIStream *ast2 = st2->priv_data;
ast2->packet_size =
ast2->remaining = 0;
if (ast2->sub_ctx) {
seek_subtitle(st, st2, timestamp);
continue;
}
if (st2->nb_index_entries <= 0)
continue;
// av_assert1(st2->codecpar->block_align);
index = av_index_search_timestamp(st2,
av_rescale_q(timestamp,
st->time_base,
st2->time_base) *
FFMAX(ast2->sample_size, 1),
flags |
AVSEEK_FLAG_BACKWARD |
(st2->codecpar->codec_type != AVMEDIA_TYPE_VIDEO ? AVSEEK_FLAG_ANY : 0));
if (index < 0)
index = 0;
ast2->seek_pos = st2->index_entries[index].pos;
pos_min = FFMIN(pos_min,ast2->seek_pos);
}
for (i = 0; i < s->nb_streams; i++) {
AVStream *st2 = s->streams[i];
AVIStream *ast2 = st2->priv_data;
if (ast2->sub_ctx || st2->nb_index_entries <= 0)
continue;
index = av_index_search_timestamp(
st2,
av_rescale_q(timestamp, st->time_base, st2->time_base) * FFMAX(ast2->sample_size, 1),
flags | AVSEEK_FLAG_BACKWARD | (st2->codecpar->codec_type != AVMEDIA_TYPE_VIDEO ? AVSEEK_FLAG_ANY : 0));
if (index < 0)
index = 0;
while (!avi->non_interleaved && index>0 && st2->index_entries[index-1].pos >= pos_min)
index--;
ast2->frame_offset = st2->index_entries[index].timestamp;
}
/* do the seek */
if (avio_seek(s->pb, pos_min, SEEK_SET) < 0) {
av_log(s, AV_LOG_ERROR, "Seek failed\n");
return -1;
}
avi->stream_index = -1;
avi->dts_max = INT_MIN;
return 0;
}
static int avi_read_close(AVFormatContext *s)
{
int i;
AVIContext *avi = s->priv_data;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
AVIStream *ast = st->priv_data;
if (ast) {
if (ast->sub_ctx) {
av_freep(&ast->sub_ctx->pb);
avformat_close_input(&ast->sub_ctx);
}
av_freep(&ast->sub_buffer);
av_packet_unref(&ast->sub_pkt);
}
}
av_freep(&avi->dv_demux);
return 0;
}
static int avi_probe(AVProbeData *p)
{
int i;
/* check file header */
for (i = 0; avi_headers[i][0]; i++)
if (AV_RL32(p->buf ) == AV_RL32(avi_headers[i] ) &&
AV_RL32(p->buf + 8) == AV_RL32(avi_headers[i] + 4))
return AVPROBE_SCORE_MAX;
return 0;
}
AVInputFormat ff_avi_demuxer = {
.name = "avi",
.long_name = NULL_IF_CONFIG_SMALL("AVI (Audio Video Interleaved)"),
.priv_data_size = sizeof(AVIContext),
.extensions = "avi",
.read_probe = avi_probe,
.read_header = avi_read_header,
.read_packet = avi_read_packet,
.read_close = avi_read_close,
.read_seek = avi_read_seek,
.priv_class = &demuxer_class,
};
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_3417_0 |
crossvul-cpp_data_bad_5258_0 | /*
+----------------------------------------------------------------------+
| PHP Version 5 |
+----------------------------------------------------------------------+
| Copyright (c) 1997-2016 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Rasmus Lerdorf <rasmus@php.net> |
| Marcus Boerger <helly@php.net> |
+----------------------------------------------------------------------+
*/
/* $Id$ */
/* ToDos
*
* See if example images from http://www.exif.org have illegal
* thumbnail sizes or if code is corrupt.
* Create/Update exif headers.
* Create/Remove/Update image thumbnails.
*/
/* Security
*
* At current time i do not see any security problems but a potential
* attacker could generate an image with recursive ifd pointers...(Marcus)
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "ext/standard/file.h"
#if HAVE_EXIF
/* When EXIF_DEBUG is defined the module generates a lot of debug messages
* that help understanding what is going on. This can and should be used
* while extending the module as it shows if you are at the right position.
* You are always considered to have a copy of TIFF6.0 and EXIF2.10 standard.
*/
#undef EXIF_DEBUG
#ifdef EXIF_DEBUG
#define EXIFERR_DC , const char *_file, size_t _line TSRMLS_DC
#define EXIFERR_CC , __FILE__, __LINE__ TSRMLS_CC
#else
#define EXIFERR_DC TSRMLS_DC
#define EXIFERR_CC TSRMLS_CC
#endif
#undef EXIF_JPEG2000
#include "php_exif.h"
#include <math.h>
#include "php_ini.h"
#include "ext/standard/php_string.h"
#include "ext/standard/php_image.h"
#include "ext/standard/info.h"
/* needed for ssize_t definition */
#include <sys/types.h>
typedef unsigned char uchar;
#ifndef safe_emalloc
# define safe_emalloc(a,b,c) emalloc((a)*(b)+(c))
#endif
#ifndef safe_erealloc
# define safe_erealloc(p,a,b,c) erealloc(p, (a)*(b)+(c))
#endif
#ifndef TRUE
# define TRUE 1
# define FALSE 0
#endif
#ifndef max
# define max(a,b) ((a)>(b) ? (a) : (b))
#endif
#define EFREE_IF(ptr) if (ptr) efree(ptr)
#define MAX_IFD_NESTING_LEVEL 100
/* {{{ arginfo */
ZEND_BEGIN_ARG_INFO(arginfo_exif_tagname, 0)
ZEND_ARG_INFO(0, index)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_exif_read_data, 0, 0, 1)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(0, sections_needed)
ZEND_ARG_INFO(0, sub_arrays)
ZEND_ARG_INFO(0, read_thumbnail)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_exif_thumbnail, 0, 0, 1)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(1, width)
ZEND_ARG_INFO(1, height)
ZEND_ARG_INFO(1, imagetype)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_exif_imagetype, 0)
ZEND_ARG_INFO(0, imagefile)
ZEND_END_ARG_INFO()
/* }}} */
/* {{{ exif_functions[]
*/
const zend_function_entry exif_functions[] = {
PHP_FE(exif_read_data, arginfo_exif_read_data)
PHP_FALIAS(read_exif_data, exif_read_data, arginfo_exif_read_data)
PHP_FE(exif_tagname, arginfo_exif_tagname)
PHP_FE(exif_thumbnail, arginfo_exif_thumbnail)
PHP_FE(exif_imagetype, arginfo_exif_imagetype)
PHP_FE_END
};
/* }}} */
#define EXIF_VERSION "1.4 $Id$"
/* {{{ PHP_MINFO_FUNCTION
*/
PHP_MINFO_FUNCTION(exif)
{
php_info_print_table_start();
php_info_print_table_row(2, "EXIF Support", "enabled");
php_info_print_table_row(2, "EXIF Version", EXIF_VERSION);
php_info_print_table_row(2, "Supported EXIF Version", "0220");
php_info_print_table_row(2, "Supported filetypes", "JPEG,TIFF");
php_info_print_table_end();
DISPLAY_INI_ENTRIES();
}
/* }}} */
ZEND_BEGIN_MODULE_GLOBALS(exif)
char * encode_unicode;
char * decode_unicode_be;
char * decode_unicode_le;
char * encode_jis;
char * decode_jis_be;
char * decode_jis_le;
ZEND_END_MODULE_GLOBALS(exif)
ZEND_DECLARE_MODULE_GLOBALS(exif)
#ifdef ZTS
#define EXIF_G(v) TSRMG(exif_globals_id, zend_exif_globals *, v)
#else
#define EXIF_G(v) (exif_globals.v)
#endif
/* {{{ PHP_INI
*/
ZEND_INI_MH(OnUpdateEncode)
{
if (new_value && new_value_length) {
const zend_encoding **return_list;
size_t return_size;
if (FAILURE == zend_multibyte_parse_encoding_list(new_value, new_value_length,
&return_list, &return_size, 0 TSRMLS_CC)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Illegal encoding ignored: '%s'", new_value);
return FAILURE;
}
efree(return_list);
}
return OnUpdateString(entry, new_value, new_value_length, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC);
}
ZEND_INI_MH(OnUpdateDecode)
{
if (new_value) {
const zend_encoding **return_list;
size_t return_size;
if (FAILURE == zend_multibyte_parse_encoding_list(new_value, new_value_length,
&return_list, &return_size, 0 TSRMLS_CC)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Illegal encoding ignored: '%s'", new_value);
return FAILURE;
}
efree(return_list);
}
return OnUpdateString(entry, new_value, new_value_length, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC);
}
PHP_INI_BEGIN()
STD_PHP_INI_ENTRY("exif.encode_unicode", "ISO-8859-15", PHP_INI_ALL, OnUpdateEncode, encode_unicode, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_unicode_motorola", "UCS-2BE", PHP_INI_ALL, OnUpdateDecode, decode_unicode_be, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_unicode_intel", "UCS-2LE", PHP_INI_ALL, OnUpdateDecode, decode_unicode_le, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.encode_jis", "", PHP_INI_ALL, OnUpdateEncode, encode_jis, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_jis_motorola", "JIS", PHP_INI_ALL, OnUpdateDecode, decode_jis_be, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_jis_intel", "JIS", PHP_INI_ALL, OnUpdateDecode, decode_jis_le, zend_exif_globals, exif_globals)
PHP_INI_END()
/* }}} */
/* {{{ PHP_GINIT_FUNCTION
*/
static PHP_GINIT_FUNCTION(exif)
{
exif_globals->encode_unicode = NULL;
exif_globals->decode_unicode_be = NULL;
exif_globals->decode_unicode_le = NULL;
exif_globals->encode_jis = NULL;
exif_globals->decode_jis_be = NULL;
exif_globals->decode_jis_le = NULL;
}
/* }}} */
/* {{{ PHP_MINIT_FUNCTION(exif)
Get the size of an image as 4-element array */
PHP_MINIT_FUNCTION(exif)
{
REGISTER_INI_ENTRIES();
if (zend_hash_exists(&module_registry, "mbstring", sizeof("mbstring"))) {
REGISTER_LONG_CONSTANT("EXIF_USE_MBSTRING", 1, CONST_CS | CONST_PERSISTENT);
} else {
REGISTER_LONG_CONSTANT("EXIF_USE_MBSTRING", 0, CONST_CS | CONST_PERSISTENT);
}
return SUCCESS;
}
/* }}} */
/* {{{ PHP_MSHUTDOWN_FUNCTION
*/
PHP_MSHUTDOWN_FUNCTION(exif)
{
UNREGISTER_INI_ENTRIES();
return SUCCESS;
}
/* }}} */
/* {{{ exif dependencies */
static const zend_module_dep exif_module_deps[] = {
ZEND_MOD_REQUIRED("standard")
ZEND_MOD_OPTIONAL("mbstring")
ZEND_MOD_END
};
/* }}} */
/* {{{ exif_module_entry
*/
zend_module_entry exif_module_entry = {
STANDARD_MODULE_HEADER_EX, NULL,
exif_module_deps,
"exif",
exif_functions,
PHP_MINIT(exif),
PHP_MSHUTDOWN(exif),
NULL, NULL,
PHP_MINFO(exif),
#if ZEND_MODULE_API_NO >= 20010901
EXIF_VERSION,
#endif
#if ZEND_MODULE_API_NO >= 20060613
PHP_MODULE_GLOBALS(exif),
PHP_GINIT(exif),
NULL,
NULL,
STANDARD_MODULE_PROPERTIES_EX
#else
STANDARD_MODULE_PROPERTIES
#endif
};
/* }}} */
#ifdef COMPILE_DL_EXIF
ZEND_GET_MODULE(exif)
#endif
/* {{{ php_strnlen
* get length of string if buffer if less than buffer size or buffer size */
static size_t php_strnlen(char* str, size_t maxlen) {
size_t len = 0;
if (str && maxlen && *str) {
do {
len++;
} while (--maxlen && *(++str));
}
return len;
}
/* }}} */
/* {{{ error messages
*/
static const char * EXIF_ERROR_FILEEOF = "Unexpected end of file reached";
static const char * EXIF_ERROR_CORRUPT = "File structure corrupted";
static const char * EXIF_ERROR_THUMBEOF = "Thumbnail goes IFD boundary or end of file reached";
static const char * EXIF_ERROR_FSREALLOC = "Illegal reallocating of undefined file section";
#define EXIF_ERRLOG_FILEEOF(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_FILEEOF);
#define EXIF_ERRLOG_CORRUPT(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_CORRUPT);
#define EXIF_ERRLOG_THUMBEOF(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_THUMBEOF);
#define EXIF_ERRLOG_FSREALLOC(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_FSREALLOC);
/* }}} */
/* {{{ format description defines
Describes format descriptor
*/
static int php_tiff_bytes_per_format[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8, 1};
#define NUM_FORMATS 13
#define TAG_FMT_BYTE 1
#define TAG_FMT_STRING 2
#define TAG_FMT_USHORT 3
#define TAG_FMT_ULONG 4
#define TAG_FMT_URATIONAL 5
#define TAG_FMT_SBYTE 6
#define TAG_FMT_UNDEFINED 7
#define TAG_FMT_SSHORT 8
#define TAG_FMT_SLONG 9
#define TAG_FMT_SRATIONAL 10
#define TAG_FMT_SINGLE 11
#define TAG_FMT_DOUBLE 12
#define TAG_FMT_IFD 13
#ifdef EXIF_DEBUG
static char *exif_get_tagformat(int format)
{
switch(format) {
case TAG_FMT_BYTE: return "BYTE";
case TAG_FMT_STRING: return "STRING";
case TAG_FMT_USHORT: return "USHORT";
case TAG_FMT_ULONG: return "ULONG";
case TAG_FMT_URATIONAL: return "URATIONAL";
case TAG_FMT_SBYTE: return "SBYTE";
case TAG_FMT_UNDEFINED: return "UNDEFINED";
case TAG_FMT_SSHORT: return "SSHORT";
case TAG_FMT_SLONG: return "SLONG";
case TAG_FMT_SRATIONAL: return "SRATIONAL";
case TAG_FMT_SINGLE: return "SINGLE";
case TAG_FMT_DOUBLE: return "DOUBLE";
case TAG_FMT_IFD: return "IFD";
}
return "*Illegal";
}
#endif
/* Describes tag values */
#define TAG_GPS_VERSION_ID 0x0000
#define TAG_GPS_LATITUDE_REF 0x0001
#define TAG_GPS_LATITUDE 0x0002
#define TAG_GPS_LONGITUDE_REF 0x0003
#define TAG_GPS_LONGITUDE 0x0004
#define TAG_GPS_ALTITUDE_REF 0x0005
#define TAG_GPS_ALTITUDE 0x0006
#define TAG_GPS_TIME_STAMP 0x0007
#define TAG_GPS_SATELLITES 0x0008
#define TAG_GPS_STATUS 0x0009
#define TAG_GPS_MEASURE_MODE 0x000A
#define TAG_GPS_DOP 0x000B
#define TAG_GPS_SPEED_REF 0x000C
#define TAG_GPS_SPEED 0x000D
#define TAG_GPS_TRACK_REF 0x000E
#define TAG_GPS_TRACK 0x000F
#define TAG_GPS_IMG_DIRECTION_REF 0x0010
#define TAG_GPS_IMG_DIRECTION 0x0011
#define TAG_GPS_MAP_DATUM 0x0012
#define TAG_GPS_DEST_LATITUDE_REF 0x0013
#define TAG_GPS_DEST_LATITUDE 0x0014
#define TAG_GPS_DEST_LONGITUDE_REF 0x0015
#define TAG_GPS_DEST_LONGITUDE 0x0016
#define TAG_GPS_DEST_BEARING_REF 0x0017
#define TAG_GPS_DEST_BEARING 0x0018
#define TAG_GPS_DEST_DISTANCE_REF 0x0019
#define TAG_GPS_DEST_DISTANCE 0x001A
#define TAG_GPS_PROCESSING_METHOD 0x001B
#define TAG_GPS_AREA_INFORMATION 0x001C
#define TAG_GPS_DATE_STAMP 0x001D
#define TAG_GPS_DIFFERENTIAL 0x001E
#define TAG_TIFF_COMMENT 0x00FE /* SHOUDLNT HAPPEN */
#define TAG_NEW_SUBFILE 0x00FE /* New version of subfile tag */
#define TAG_SUBFILE_TYPE 0x00FF /* Old version of subfile tag */
#define TAG_IMAGEWIDTH 0x0100
#define TAG_IMAGEHEIGHT 0x0101
#define TAG_BITS_PER_SAMPLE 0x0102
#define TAG_COMPRESSION 0x0103
#define TAG_PHOTOMETRIC_INTERPRETATION 0x0106
#define TAG_TRESHHOLDING 0x0107
#define TAG_CELL_WIDTH 0x0108
#define TAG_CELL_HEIGHT 0x0109
#define TAG_FILL_ORDER 0x010A
#define TAG_DOCUMENT_NAME 0x010D
#define TAG_IMAGE_DESCRIPTION 0x010E
#define TAG_MAKE 0x010F
#define TAG_MODEL 0x0110
#define TAG_STRIP_OFFSETS 0x0111
#define TAG_ORIENTATION 0x0112
#define TAG_SAMPLES_PER_PIXEL 0x0115
#define TAG_ROWS_PER_STRIP 0x0116
#define TAG_STRIP_BYTE_COUNTS 0x0117
#define TAG_MIN_SAMPPLE_VALUE 0x0118
#define TAG_MAX_SAMPLE_VALUE 0x0119
#define TAG_X_RESOLUTION 0x011A
#define TAG_Y_RESOLUTION 0x011B
#define TAG_PLANAR_CONFIGURATION 0x011C
#define TAG_PAGE_NAME 0x011D
#define TAG_X_POSITION 0x011E
#define TAG_Y_POSITION 0x011F
#define TAG_FREE_OFFSETS 0x0120
#define TAG_FREE_BYTE_COUNTS 0x0121
#define TAG_GRAY_RESPONSE_UNIT 0x0122
#define TAG_GRAY_RESPONSE_CURVE 0x0123
#define TAG_RESOLUTION_UNIT 0x0128
#define TAG_PAGE_NUMBER 0x0129
#define TAG_TRANSFER_FUNCTION 0x012D
#define TAG_SOFTWARE 0x0131
#define TAG_DATETIME 0x0132
#define TAG_ARTIST 0x013B
#define TAG_HOST_COMPUTER 0x013C
#define TAG_PREDICTOR 0x013D
#define TAG_WHITE_POINT 0x013E
#define TAG_PRIMARY_CHROMATICITIES 0x013F
#define TAG_COLOR_MAP 0x0140
#define TAG_HALFTONE_HINTS 0x0141
#define TAG_TILE_WIDTH 0x0142
#define TAG_TILE_LENGTH 0x0143
#define TAG_TILE_OFFSETS 0x0144
#define TAG_TILE_BYTE_COUNTS 0x0145
#define TAG_SUB_IFD 0x014A
#define TAG_INK_SETMPUTER 0x014C
#define TAG_INK_NAMES 0x014D
#define TAG_NUMBER_OF_INKS 0x014E
#define TAG_DOT_RANGE 0x0150
#define TAG_TARGET_PRINTER 0x0151
#define TAG_EXTRA_SAMPLE 0x0152
#define TAG_SAMPLE_FORMAT 0x0153
#define TAG_S_MIN_SAMPLE_VALUE 0x0154
#define TAG_S_MAX_SAMPLE_VALUE 0x0155
#define TAG_TRANSFER_RANGE 0x0156
#define TAG_JPEG_TABLES 0x015B
#define TAG_JPEG_PROC 0x0200
#define TAG_JPEG_INTERCHANGE_FORMAT 0x0201
#define TAG_JPEG_INTERCHANGE_FORMAT_LEN 0x0202
#define TAG_JPEG_RESTART_INTERVAL 0x0203
#define TAG_JPEG_LOSSLESS_PREDICTOR 0x0205
#define TAG_JPEG_POINT_TRANSFORMS 0x0206
#define TAG_JPEG_Q_TABLES 0x0207
#define TAG_JPEG_DC_TABLES 0x0208
#define TAG_JPEG_AC_TABLES 0x0209
#define TAG_YCC_COEFFICIENTS 0x0211
#define TAG_YCC_SUB_SAMPLING 0x0212
#define TAG_YCC_POSITIONING 0x0213
#define TAG_REFERENCE_BLACK_WHITE 0x0214
/* 0x0301 - 0x0302 */
/* 0x0320 */
/* 0x0343 */
/* 0x5001 - 0x501B */
/* 0x5021 - 0x503B */
/* 0x5090 - 0x5091 */
/* 0x5100 - 0x5101 */
/* 0x5110 - 0x5113 */
/* 0x80E3 - 0x80E6 */
/* 0x828d - 0x828F */
#define TAG_COPYRIGHT 0x8298
#define TAG_EXPOSURETIME 0x829A
#define TAG_FNUMBER 0x829D
#define TAG_EXIF_IFD_POINTER 0x8769
#define TAG_ICC_PROFILE 0x8773
#define TAG_EXPOSURE_PROGRAM 0x8822
#define TAG_SPECTRAL_SENSITY 0x8824
#define TAG_GPS_IFD_POINTER 0x8825
#define TAG_ISOSPEED 0x8827
#define TAG_OPTOELECTRIC_CONVERSION_F 0x8828
/* 0x8829 - 0x882b */
#define TAG_EXIFVERSION 0x9000
#define TAG_DATE_TIME_ORIGINAL 0x9003
#define TAG_DATE_TIME_DIGITIZED 0x9004
#define TAG_COMPONENT_CONFIG 0x9101
#define TAG_COMPRESSED_BITS_PER_PIXEL 0x9102
#define TAG_SHUTTERSPEED 0x9201
#define TAG_APERTURE 0x9202
#define TAG_BRIGHTNESS_VALUE 0x9203
#define TAG_EXPOSURE_BIAS_VALUE 0x9204
#define TAG_MAX_APERTURE 0x9205
#define TAG_SUBJECT_DISTANCE 0x9206
#define TAG_METRIC_MODULE 0x9207
#define TAG_LIGHT_SOURCE 0x9208
#define TAG_FLASH 0x9209
#define TAG_FOCAL_LENGTH 0x920A
/* 0x920B - 0x920D */
/* 0x9211 - 0x9216 */
#define TAG_SUBJECT_AREA 0x9214
#define TAG_MAKER_NOTE 0x927C
#define TAG_USERCOMMENT 0x9286
#define TAG_SUB_SEC_TIME 0x9290
#define TAG_SUB_SEC_TIME_ORIGINAL 0x9291
#define TAG_SUB_SEC_TIME_DIGITIZED 0x9292
/* 0x923F */
/* 0x935C */
#define TAG_XP_TITLE 0x9C9B
#define TAG_XP_COMMENTS 0x9C9C
#define TAG_XP_AUTHOR 0x9C9D
#define TAG_XP_KEYWORDS 0x9C9E
#define TAG_XP_SUBJECT 0x9C9F
#define TAG_FLASH_PIX_VERSION 0xA000
#define TAG_COLOR_SPACE 0xA001
#define TAG_COMP_IMAGE_WIDTH 0xA002 /* compressed images only */
#define TAG_COMP_IMAGE_HEIGHT 0xA003
#define TAG_RELATED_SOUND_FILE 0xA004
#define TAG_INTEROP_IFD_POINTER 0xA005 /* IFD pointer */
#define TAG_FLASH_ENERGY 0xA20B
#define TAG_SPATIAL_FREQUENCY_RESPONSE 0xA20C
#define TAG_FOCALPLANE_X_RES 0xA20E
#define TAG_FOCALPLANE_Y_RES 0xA20F
#define TAG_FOCALPLANE_RESOLUTION_UNIT 0xA210
#define TAG_SUBJECT_LOCATION 0xA214
#define TAG_EXPOSURE_INDEX 0xA215
#define TAG_SENSING_METHOD 0xA217
#define TAG_FILE_SOURCE 0xA300
#define TAG_SCENE_TYPE 0xA301
#define TAG_CFA_PATTERN 0xA302
#define TAG_CUSTOM_RENDERED 0xA401
#define TAG_EXPOSURE_MODE 0xA402
#define TAG_WHITE_BALANCE 0xA403
#define TAG_DIGITAL_ZOOM_RATIO 0xA404
#define TAG_FOCAL_LENGTH_IN_35_MM_FILM 0xA405
#define TAG_SCENE_CAPTURE_TYPE 0xA406
#define TAG_GAIN_CONTROL 0xA407
#define TAG_CONTRAST 0xA408
#define TAG_SATURATION 0xA409
#define TAG_SHARPNESS 0xA40A
#define TAG_DEVICE_SETTING_DESCRIPTION 0xA40B
#define TAG_SUBJECT_DISTANCE_RANGE 0xA40C
#define TAG_IMAGE_UNIQUE_ID 0xA420
/* Olympus specific tags */
#define TAG_OLYMPUS_SPECIALMODE 0x0200
#define TAG_OLYMPUS_JPEGQUAL 0x0201
#define TAG_OLYMPUS_MACRO 0x0202
#define TAG_OLYMPUS_DIGIZOOM 0x0204
#define TAG_OLYMPUS_SOFTWARERELEASE 0x0207
#define TAG_OLYMPUS_PICTINFO 0x0208
#define TAG_OLYMPUS_CAMERAID 0x0209
/* end Olympus specific tags */
/* Internal */
#define TAG_NONE -1 /* note that -1 <> 0xFFFF */
#define TAG_COMPUTED_VALUE -2
#define TAG_END_OF_LIST 0xFFFD
/* Values for TAG_PHOTOMETRIC_INTERPRETATION */
#define PMI_BLACK_IS_ZERO 0
#define PMI_WHITE_IS_ZERO 1
#define PMI_RGB 2
#define PMI_PALETTE_COLOR 3
#define PMI_TRANSPARENCY_MASK 4
#define PMI_SEPARATED 5
#define PMI_YCBCR 6
#define PMI_CIELAB 8
/* }}} */
/* {{{ TabTable[]
*/
typedef const struct {
unsigned short Tag;
char *Desc;
} tag_info_type;
typedef tag_info_type tag_info_array[];
typedef tag_info_type *tag_table_type;
#define TAG_TABLE_END \
{TAG_NONE, "No tag value"},\
{TAG_COMPUTED_VALUE, "Computed value"},\
{TAG_END_OF_LIST, ""} /* Important for exif_get_tagname() IF value != "" function result is != false */
static tag_info_array tag_table_IFD = {
{ 0x000B, "ACDComment"},
{ 0x00FE, "NewSubFile"}, /* better name it 'ImageType' ? */
{ 0x00FF, "SubFile"},
{ 0x0100, "ImageWidth"},
{ 0x0101, "ImageLength"},
{ 0x0102, "BitsPerSample"},
{ 0x0103, "Compression"},
{ 0x0106, "PhotometricInterpretation"},
{ 0x010A, "FillOrder"},
{ 0x010D, "DocumentName"},
{ 0x010E, "ImageDescription"},
{ 0x010F, "Make"},
{ 0x0110, "Model"},
{ 0x0111, "StripOffsets"},
{ 0x0112, "Orientation"},
{ 0x0115, "SamplesPerPixel"},
{ 0x0116, "RowsPerStrip"},
{ 0x0117, "StripByteCounts"},
{ 0x0118, "MinSampleValue"},
{ 0x0119, "MaxSampleValue"},
{ 0x011A, "XResolution"},
{ 0x011B, "YResolution"},
{ 0x011C, "PlanarConfiguration"},
{ 0x011D, "PageName"},
{ 0x011E, "XPosition"},
{ 0x011F, "YPosition"},
{ 0x0120, "FreeOffsets"},
{ 0x0121, "FreeByteCounts"},
{ 0x0122, "GrayResponseUnit"},
{ 0x0123, "GrayResponseCurve"},
{ 0x0124, "T4Options"},
{ 0x0125, "T6Options"},
{ 0x0128, "ResolutionUnit"},
{ 0x0129, "PageNumber"},
{ 0x012D, "TransferFunction"},
{ 0x0131, "Software"},
{ 0x0132, "DateTime"},
{ 0x013B, "Artist"},
{ 0x013C, "HostComputer"},
{ 0x013D, "Predictor"},
{ 0x013E, "WhitePoint"},
{ 0x013F, "PrimaryChromaticities"},
{ 0x0140, "ColorMap"},
{ 0x0141, "HalfToneHints"},
{ 0x0142, "TileWidth"},
{ 0x0143, "TileLength"},
{ 0x0144, "TileOffsets"},
{ 0x0145, "TileByteCounts"},
{ 0x014A, "SubIFD"},
{ 0x014C, "InkSet"},
{ 0x014D, "InkNames"},
{ 0x014E, "NumberOfInks"},
{ 0x0150, "DotRange"},
{ 0x0151, "TargetPrinter"},
{ 0x0152, "ExtraSample"},
{ 0x0153, "SampleFormat"},
{ 0x0154, "SMinSampleValue"},
{ 0x0155, "SMaxSampleValue"},
{ 0x0156, "TransferRange"},
{ 0x0157, "ClipPath"},
{ 0x0158, "XClipPathUnits"},
{ 0x0159, "YClipPathUnits"},
{ 0x015A, "Indexed"},
{ 0x015B, "JPEGTables"},
{ 0x015F, "OPIProxy"},
{ 0x0200, "JPEGProc"},
{ 0x0201, "JPEGInterchangeFormat"},
{ 0x0202, "JPEGInterchangeFormatLength"},
{ 0x0203, "JPEGRestartInterval"},
{ 0x0205, "JPEGLosslessPredictors"},
{ 0x0206, "JPEGPointTransforms"},
{ 0x0207, "JPEGQTables"},
{ 0x0208, "JPEGDCTables"},
{ 0x0209, "JPEGACTables"},
{ 0x0211, "YCbCrCoefficients"},
{ 0x0212, "YCbCrSubSampling"},
{ 0x0213, "YCbCrPositioning"},
{ 0x0214, "ReferenceBlackWhite"},
{ 0x02BC, "ExtensibleMetadataPlatform"}, /* XAP: Extensible Authoring Publishing, obsoleted by XMP: Extensible Metadata Platform */
{ 0x0301, "Gamma"},
{ 0x0302, "ICCProfileDescriptor"},
{ 0x0303, "SRGBRenderingIntent"},
{ 0x0320, "ImageTitle"},
{ 0x5001, "ResolutionXUnit"},
{ 0x5002, "ResolutionYUnit"},
{ 0x5003, "ResolutionXLengthUnit"},
{ 0x5004, "ResolutionYLengthUnit"},
{ 0x5005, "PrintFlags"},
{ 0x5006, "PrintFlagsVersion"},
{ 0x5007, "PrintFlagsCrop"},
{ 0x5008, "PrintFlagsBleedWidth"},
{ 0x5009, "PrintFlagsBleedWidthScale"},
{ 0x500A, "HalftoneLPI"},
{ 0x500B, "HalftoneLPIUnit"},
{ 0x500C, "HalftoneDegree"},
{ 0x500D, "HalftoneShape"},
{ 0x500E, "HalftoneMisc"},
{ 0x500F, "HalftoneScreen"},
{ 0x5010, "JPEGQuality"},
{ 0x5011, "GridSize"},
{ 0x5012, "ThumbnailFormat"},
{ 0x5013, "ThumbnailWidth"},
{ 0x5014, "ThumbnailHeight"},
{ 0x5015, "ThumbnailColorDepth"},
{ 0x5016, "ThumbnailPlanes"},
{ 0x5017, "ThumbnailRawBytes"},
{ 0x5018, "ThumbnailSize"},
{ 0x5019, "ThumbnailCompressedSize"},
{ 0x501A, "ColorTransferFunction"},
{ 0x501B, "ThumbnailData"},
{ 0x5020, "ThumbnailImageWidth"},
{ 0x5021, "ThumbnailImageHeight"},
{ 0x5022, "ThumbnailBitsPerSample"},
{ 0x5023, "ThumbnailCompression"},
{ 0x5024, "ThumbnailPhotometricInterp"},
{ 0x5025, "ThumbnailImageDescription"},
{ 0x5026, "ThumbnailEquipMake"},
{ 0x5027, "ThumbnailEquipModel"},
{ 0x5028, "ThumbnailStripOffsets"},
{ 0x5029, "ThumbnailOrientation"},
{ 0x502A, "ThumbnailSamplesPerPixel"},
{ 0x502B, "ThumbnailRowsPerStrip"},
{ 0x502C, "ThumbnailStripBytesCount"},
{ 0x502D, "ThumbnailResolutionX"},
{ 0x502E, "ThumbnailResolutionY"},
{ 0x502F, "ThumbnailPlanarConfig"},
{ 0x5030, "ThumbnailResolutionUnit"},
{ 0x5031, "ThumbnailTransferFunction"},
{ 0x5032, "ThumbnailSoftwareUsed"},
{ 0x5033, "ThumbnailDateTime"},
{ 0x5034, "ThumbnailArtist"},
{ 0x5035, "ThumbnailWhitePoint"},
{ 0x5036, "ThumbnailPrimaryChromaticities"},
{ 0x5037, "ThumbnailYCbCrCoefficients"},
{ 0x5038, "ThumbnailYCbCrSubsampling"},
{ 0x5039, "ThumbnailYCbCrPositioning"},
{ 0x503A, "ThumbnailRefBlackWhite"},
{ 0x503B, "ThumbnailCopyRight"},
{ 0x5090, "LuminanceTable"},
{ 0x5091, "ChrominanceTable"},
{ 0x5100, "FrameDelay"},
{ 0x5101, "LoopCount"},
{ 0x5110, "PixelUnit"},
{ 0x5111, "PixelPerUnitX"},
{ 0x5112, "PixelPerUnitY"},
{ 0x5113, "PaletteHistogram"},
{ 0x1000, "RelatedImageFileFormat"},
{ 0x800D, "ImageID"},
{ 0x80E3, "Matteing"}, /* obsoleted by ExtraSamples */
{ 0x80E4, "DataType"}, /* obsoleted by SampleFormat */
{ 0x80E5, "ImageDepth"},
{ 0x80E6, "TileDepth"},
{ 0x828D, "CFARepeatPatternDim"},
{ 0x828E, "CFAPattern"},
{ 0x828F, "BatteryLevel"},
{ 0x8298, "Copyright"},
{ 0x829A, "ExposureTime"},
{ 0x829D, "FNumber"},
{ 0x83BB, "IPTC/NAA"},
{ 0x84E3, "IT8RasterPadding"},
{ 0x84E5, "IT8ColorTable"},
{ 0x8649, "ImageResourceInformation"}, /* PhotoShop */
{ 0x8769, "Exif_IFD_Pointer"},
{ 0x8773, "ICC_Profile"},
{ 0x8822, "ExposureProgram"},
{ 0x8824, "SpectralSensity"},
{ 0x8828, "OECF"},
{ 0x8825, "GPS_IFD_Pointer"},
{ 0x8827, "ISOSpeedRatings"},
{ 0x8828, "OECF"},
{ 0x9000, "ExifVersion"},
{ 0x9003, "DateTimeOriginal"},
{ 0x9004, "DateTimeDigitized"},
{ 0x9101, "ComponentsConfiguration"},
{ 0x9102, "CompressedBitsPerPixel"},
{ 0x9201, "ShutterSpeedValue"},
{ 0x9202, "ApertureValue"},
{ 0x9203, "BrightnessValue"},
{ 0x9204, "ExposureBiasValue"},
{ 0x9205, "MaxApertureValue"},
{ 0x9206, "SubjectDistance"},
{ 0x9207, "MeteringMode"},
{ 0x9208, "LightSource"},
{ 0x9209, "Flash"},
{ 0x920A, "FocalLength"},
{ 0x920B, "FlashEnergy"}, /* 0xA20B in JPEG */
{ 0x920C, "SpatialFrequencyResponse"}, /* 0xA20C - - */
{ 0x920D, "Noise"},
{ 0x920E, "FocalPlaneXResolution"}, /* 0xA20E - - */
{ 0x920F, "FocalPlaneYResolution"}, /* 0xA20F - - */
{ 0x9210, "FocalPlaneResolutionUnit"}, /* 0xA210 - - */
{ 0x9211, "ImageNumber"},
{ 0x9212, "SecurityClassification"},
{ 0x9213, "ImageHistory"},
{ 0x9214, "SubjectLocation"}, /* 0xA214 - - */
{ 0x9215, "ExposureIndex"}, /* 0xA215 - - */
{ 0x9216, "TIFF/EPStandardID"},
{ 0x9217, "SensingMethod"}, /* 0xA217 - - */
{ 0x923F, "StoNits"},
{ 0x927C, "MakerNote"},
{ 0x9286, "UserComment"},
{ 0x9290, "SubSecTime"},
{ 0x9291, "SubSecTimeOriginal"},
{ 0x9292, "SubSecTimeDigitized"},
{ 0x935C, "ImageSourceData"}, /* "Adobe Photoshop Document Data Block": 8BIM... */
{ 0x9c9b, "Title" }, /* Win XP specific, Unicode */
{ 0x9c9c, "Comments" }, /* Win XP specific, Unicode */
{ 0x9c9d, "Author" }, /* Win XP specific, Unicode */
{ 0x9c9e, "Keywords" }, /* Win XP specific, Unicode */
{ 0x9c9f, "Subject" }, /* Win XP specific, Unicode, not to be confused with SubjectDistance and SubjectLocation */
{ 0xA000, "FlashPixVersion"},
{ 0xA001, "ColorSpace"},
{ 0xA002, "ExifImageWidth"},
{ 0xA003, "ExifImageLength"},
{ 0xA004, "RelatedSoundFile"},
{ 0xA005, "InteroperabilityOffset"},
{ 0xA20B, "FlashEnergy"}, /* 0x920B in TIFF/EP */
{ 0xA20C, "SpatialFrequencyResponse"}, /* 0x920C - - */
{ 0xA20D, "Noise"},
{ 0xA20E, "FocalPlaneXResolution"}, /* 0x920E - - */
{ 0xA20F, "FocalPlaneYResolution"}, /* 0x920F - - */
{ 0xA210, "FocalPlaneResolutionUnit"}, /* 0x9210 - - */
{ 0xA211, "ImageNumber"},
{ 0xA212, "SecurityClassification"},
{ 0xA213, "ImageHistory"},
{ 0xA214, "SubjectLocation"}, /* 0x9214 - - */
{ 0xA215, "ExposureIndex"}, /* 0x9215 - - */
{ 0xA216, "TIFF/EPStandardID"},
{ 0xA217, "SensingMethod"}, /* 0x9217 - - */
{ 0xA300, "FileSource"},
{ 0xA301, "SceneType"},
{ 0xA302, "CFAPattern"},
{ 0xA401, "CustomRendered"},
{ 0xA402, "ExposureMode"},
{ 0xA403, "WhiteBalance"},
{ 0xA404, "DigitalZoomRatio"},
{ 0xA405, "FocalLengthIn35mmFilm"},
{ 0xA406, "SceneCaptureType"},
{ 0xA407, "GainControl"},
{ 0xA408, "Contrast"},
{ 0xA409, "Saturation"},
{ 0xA40A, "Sharpness"},
{ 0xA40B, "DeviceSettingDescription"},
{ 0xA40C, "SubjectDistanceRange"},
{ 0xA420, "ImageUniqueID"},
TAG_TABLE_END
} ;
static tag_info_array tag_table_GPS = {
{ 0x0000, "GPSVersion"},
{ 0x0001, "GPSLatitudeRef"},
{ 0x0002, "GPSLatitude"},
{ 0x0003, "GPSLongitudeRef"},
{ 0x0004, "GPSLongitude"},
{ 0x0005, "GPSAltitudeRef"},
{ 0x0006, "GPSAltitude"},
{ 0x0007, "GPSTimeStamp"},
{ 0x0008, "GPSSatellites"},
{ 0x0009, "GPSStatus"},
{ 0x000A, "GPSMeasureMode"},
{ 0x000B, "GPSDOP"},
{ 0x000C, "GPSSpeedRef"},
{ 0x000D, "GPSSpeed"},
{ 0x000E, "GPSTrackRef"},
{ 0x000F, "GPSTrack"},
{ 0x0010, "GPSImgDirectionRef"},
{ 0x0011, "GPSImgDirection"},
{ 0x0012, "GPSMapDatum"},
{ 0x0013, "GPSDestLatitudeRef"},
{ 0x0014, "GPSDestLatitude"},
{ 0x0015, "GPSDestLongitudeRef"},
{ 0x0016, "GPSDestLongitude"},
{ 0x0017, "GPSDestBearingRef"},
{ 0x0018, "GPSDestBearing"},
{ 0x0019, "GPSDestDistanceRef"},
{ 0x001A, "GPSDestDistance"},
{ 0x001B, "GPSProcessingMode"},
{ 0x001C, "GPSAreaInformation"},
{ 0x001D, "GPSDateStamp"},
{ 0x001E, "GPSDifferential"},
TAG_TABLE_END
};
static tag_info_array tag_table_IOP = {
{ 0x0001, "InterOperabilityIndex"}, /* should be 'R98' or 'THM' */
{ 0x0002, "InterOperabilityVersion"},
{ 0x1000, "RelatedFileFormat"},
{ 0x1001, "RelatedImageWidth"},
{ 0x1002, "RelatedImageHeight"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_CANON = {
{ 0x0001, "ModeArray"}, /* guess */
{ 0x0004, "ImageInfo"}, /* guess */
{ 0x0006, "ImageType"},
{ 0x0007, "FirmwareVersion"},
{ 0x0008, "ImageNumber"},
{ 0x0009, "OwnerName"},
{ 0x000C, "Camera"},
{ 0x000F, "CustomFunctions"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_CASIO = {
{ 0x0001, "RecordingMode"},
{ 0x0002, "Quality"},
{ 0x0003, "FocusingMode"},
{ 0x0004, "FlashMode"},
{ 0x0005, "FlashIntensity"},
{ 0x0006, "ObjectDistance"},
{ 0x0007, "WhiteBalance"},
{ 0x000A, "DigitalZoom"},
{ 0x000B, "Sharpness"},
{ 0x000C, "Contrast"},
{ 0x000D, "Saturation"},
{ 0x0014, "CCDSensitivity"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_FUJI = {
{ 0x0000, "Version"},
{ 0x1000, "Quality"},
{ 0x1001, "Sharpness"},
{ 0x1002, "WhiteBalance"},
{ 0x1003, "Color"},
{ 0x1004, "Tone"},
{ 0x1010, "FlashMode"},
{ 0x1011, "FlashStrength"},
{ 0x1020, "Macro"},
{ 0x1021, "FocusMode"},
{ 0x1030, "SlowSync"},
{ 0x1031, "PictureMode"},
{ 0x1100, "ContTake"},
{ 0x1300, "BlurWarning"},
{ 0x1301, "FocusWarning"},
{ 0x1302, "AEWarning "},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_NIKON = {
{ 0x0003, "Quality"},
{ 0x0004, "ColorMode"},
{ 0x0005, "ImageAdjustment"},
{ 0x0006, "CCDSensitivity"},
{ 0x0007, "WhiteBalance"},
{ 0x0008, "Focus"},
{ 0x000a, "DigitalZoom"},
{ 0x000b, "Converter"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_NIKON_990 = {
{ 0x0001, "Version"},
{ 0x0002, "ISOSetting"},
{ 0x0003, "ColorMode"},
{ 0x0004, "Quality"},
{ 0x0005, "WhiteBalance"},
{ 0x0006, "ImageSharpening"},
{ 0x0007, "FocusMode"},
{ 0x0008, "FlashSetting"},
{ 0x000F, "ISOSelection"},
{ 0x0080, "ImageAdjustment"},
{ 0x0082, "AuxiliaryLens"},
{ 0x0085, "ManualFocusDistance"},
{ 0x0086, "DigitalZoom"},
{ 0x0088, "AFFocusPosition"},
{ 0x0010, "DataDump"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_OLYMPUS = {
{ 0x0200, "SpecialMode"},
{ 0x0201, "JPEGQuality"},
{ 0x0202, "Macro"},
{ 0x0204, "DigitalZoom"},
{ 0x0207, "SoftwareRelease"},
{ 0x0208, "PictureInfo"},
{ 0x0209, "CameraId"},
{ 0x0F00, "DataDump"},
TAG_TABLE_END
};
typedef enum mn_byte_order_t {
MN_ORDER_INTEL = 0,
MN_ORDER_MOTOROLA = 1,
MN_ORDER_NORMAL
} mn_byte_order_t;
typedef enum mn_offset_mode_t {
MN_OFFSET_NORMAL,
MN_OFFSET_MAKER,
MN_OFFSET_GUESS
} mn_offset_mode_t;
typedef struct {
tag_table_type tag_table;
char * make;
char * model;
char * id_string;
int id_string_len;
int offset;
mn_byte_order_t byte_order;
mn_offset_mode_t offset_mode;
} maker_note_type;
static const maker_note_type maker_note_array[] = {
{ tag_table_VND_CANON, "Canon", NULL, NULL, 0, 0, MN_ORDER_INTEL, MN_OFFSET_GUESS},
/* { tag_table_VND_CANON, "Canon", NULL, NULL, 0, 0, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},*/
{ tag_table_VND_CASIO, "CASIO", NULL, NULL, 0, 0, MN_ORDER_MOTOROLA, MN_OFFSET_NORMAL},
{ tag_table_VND_FUJI, "FUJIFILM", NULL, "FUJIFILM\x0C\x00\x00\x00", 12, 12, MN_ORDER_INTEL, MN_OFFSET_MAKER},
{ tag_table_VND_NIKON, "NIKON", NULL, "Nikon\x00\x01\x00", 8, 8, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},
{ tag_table_VND_NIKON_990, "NIKON", NULL, NULL, 0, 0, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},
{ tag_table_VND_OLYMPUS, "OLYMPUS OPTICAL CO.,LTD", NULL, "OLYMP\x00\x01\x00", 8, 8, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},
};
/* }}} */
/* {{{ exif_get_tagname
Get headername for tag_num or NULL if not defined */
static char * exif_get_tagname(int tag_num, char *ret, int len, tag_table_type tag_table TSRMLS_DC)
{
int i, t;
char tmp[32];
for (i = 0; (t = tag_table[i].Tag) != TAG_END_OF_LIST; i++) {
if (t == tag_num) {
if (ret && len) {
strlcpy(ret, tag_table[i].Desc, abs(len));
if (len < 0) {
memset(ret + strlen(ret), ' ', -len - strlen(ret) - 1);
ret[-len - 1] = '\0';
}
return ret;
}
return tag_table[i].Desc;
}
}
if (ret && len) {
snprintf(tmp, sizeof(tmp), "UndefinedTag:0x%04X", tag_num);
strlcpy(ret, tmp, abs(len));
if (len < 0) {
memset(ret + strlen(ret), ' ', -len - strlen(ret) - 1);
ret[-len - 1] = '\0';
}
return ret;
}
return "";
}
/* }}} */
/* {{{ exif_char_dump
* Do not use! This is a debug function... */
#ifdef EXIF_DEBUG
static unsigned char* exif_char_dump(unsigned char * addr, int len, int offset)
{
static unsigned char buf[4096+1];
static unsigned char tmp[20];
int c, i, p=0, n = 5+31;
p += slprintf(buf+p, sizeof(buf)-p, "\nDump Len: %08X (%d)", len, len);
if (len) {
for(i=0; i<len+15 && p+n<=sizeof(buf); i++) {
if (i%16==0) {
p += slprintf(buf+p, sizeof(buf)-p, "\n%08X: ", i+offset);
}
if (i<len) {
c = *addr++;
p += slprintf(buf+p, sizeof(buf)-p, "%02X ", c);
tmp[i%16] = c>=32 ? c : '.';
tmp[(i%16)+1] = '\0';
} else {
p += slprintf(buf+p, sizeof(buf)-p, " ");
}
if (i%16==15) {
p += slprintf(buf+p, sizeof(buf)-p, " %s", tmp);
if (i>=len) {
break;
}
}
}
}
buf[sizeof(buf)-1] = '\0';
return buf;
}
#endif
/* }}} */
/* {{{ php_jpg_get16
Get 16 bits motorola order (always) for jpeg header stuff.
*/
static int php_jpg_get16(void *value)
{
return (((uchar *)value)[0] << 8) | ((uchar *)value)[1];
}
/* }}} */
/* {{{ php_ifd_get16u
* Convert a 16 bit unsigned value from file's native byte order */
static int php_ifd_get16u(void *value, int motorola_intel)
{
if (motorola_intel) {
return (((uchar *)value)[0] << 8) | ((uchar *)value)[1];
} else {
return (((uchar *)value)[1] << 8) | ((uchar *)value)[0];
}
}
/* }}} */
/* {{{ php_ifd_get16s
* Convert a 16 bit signed value from file's native byte order */
static signed short php_ifd_get16s(void *value, int motorola_intel)
{
return (signed short)php_ifd_get16u(value, motorola_intel);
}
/* }}} */
/* {{{ php_ifd_get32s
* Convert a 32 bit signed value from file's native byte order */
static int php_ifd_get32s(void *value, int motorola_intel)
{
if (motorola_intel) {
return (((char *)value)[0] << 24)
| (((uchar *)value)[1] << 16)
| (((uchar *)value)[2] << 8 )
| (((uchar *)value)[3] );
} else {
return (((char *)value)[3] << 24)
| (((uchar *)value)[2] << 16)
| (((uchar *)value)[1] << 8 )
| (((uchar *)value)[0] );
}
}
/* }}} */
/* {{{ php_ifd_get32u
* Write 32 bit unsigned value to data */
static unsigned php_ifd_get32u(void *value, int motorola_intel)
{
return (unsigned)php_ifd_get32s(value, motorola_intel) & 0xffffffff;
}
/* }}} */
/* {{{ php_ifd_set16u
* Write 16 bit unsigned value to data */
static void php_ifd_set16u(char *data, unsigned int value, int motorola_intel)
{
if (motorola_intel) {
data[0] = (value & 0xFF00) >> 8;
data[1] = (value & 0x00FF);
} else {
data[1] = (value & 0xFF00) >> 8;
data[0] = (value & 0x00FF);
}
}
/* }}} */
/* {{{ php_ifd_set32u
* Convert a 32 bit unsigned value from file's native byte order */
static void php_ifd_set32u(char *data, size_t value, int motorola_intel)
{
if (motorola_intel) {
data[0] = (value & 0xFF000000) >> 24;
data[1] = (value & 0x00FF0000) >> 16;
data[2] = (value & 0x0000FF00) >> 8;
data[3] = (value & 0x000000FF);
} else {
data[3] = (value & 0xFF000000) >> 24;
data[2] = (value & 0x00FF0000) >> 16;
data[1] = (value & 0x0000FF00) >> 8;
data[0] = (value & 0x000000FF);
}
}
/* }}} */
#ifdef EXIF_DEBUG
char * exif_dump_data(int *dump_free, int format, int components, int length, int motorola_intel, char *value_ptr TSRMLS_DC) /* {{{ */
{
char *dump;
int len;
*dump_free = 0;
if (format == TAG_FMT_STRING) {
return value_ptr ? value_ptr : "<no data>";
}
if (format == TAG_FMT_UNDEFINED) {
return "<undefined>\n";
}
if (format == TAG_FMT_IFD) {
return "";
}
if (format == TAG_FMT_SINGLE || format == TAG_FMT_DOUBLE) {
return "<not implemented>";
}
*dump_free = 1;
if (components > 1) {
len = spprintf(&dump, 0, "(%d,%d) {", components, length);
} else {
len = spprintf(&dump, 0, "{");
}
while(components > 0) {
switch(format) {
case TAG_FMT_BYTE:
case TAG_FMT_UNDEFINED:
case TAG_FMT_STRING:
case TAG_FMT_SBYTE:
dump = erealloc(dump, len + 4 + 1);
snprintf(dump + len, 4 + 1, "0x%02X", *value_ptr);
len += 4;
value_ptr++;
break;
case TAG_FMT_USHORT:
case TAG_FMT_SSHORT:
dump = erealloc(dump, len + 6 + 1);
snprintf(dump + len, 6 + 1, "0x%04X", php_ifd_get16s(value_ptr, motorola_intel));
len += 6;
value_ptr += 2;
break;
case TAG_FMT_ULONG:
case TAG_FMT_SLONG:
dump = erealloc(dump, len + 6 + 1);
snprintf(dump + len, 6 + 1, "0x%04X", php_ifd_get32s(value_ptr, motorola_intel));
len += 6;
value_ptr += 4;
break;
case TAG_FMT_URATIONAL:
case TAG_FMT_SRATIONAL:
dump = erealloc(dump, len + 13 + 1);
snprintf(dump + len, 13 + 1, "0x%04X/0x%04X", php_ifd_get32s(value_ptr, motorola_intel), php_ifd_get32s(value_ptr+4, motorola_intel));
len += 13;
value_ptr += 8;
break;
}
if (components > 0) {
dump = erealloc(dump, len + 2 + 1);
snprintf(dump + len, 2 + 1, ", ");
len += 2;
components--;
} else{
break;
}
}
dump = erealloc(dump, len + 1 + 1);
snprintf(dump + len, 1 + 1, "}");
return dump;
}
/* }}} */
#endif
/* {{{ exif_convert_any_format
* Evaluate number, be it int, rational, or float from directory. */
static double exif_convert_any_format(void *value, int format, int motorola_intel TSRMLS_DC)
{
int s_den;
unsigned u_den;
switch(format) {
case TAG_FMT_SBYTE: return *(signed char *)value;
case TAG_FMT_BYTE: return *(uchar *)value;
case TAG_FMT_USHORT: return php_ifd_get16u(value, motorola_intel);
case TAG_FMT_ULONG: return php_ifd_get32u(value, motorola_intel);
case TAG_FMT_URATIONAL:
u_den = php_ifd_get32u(4+(char *)value, motorola_intel);
if (u_den == 0) {
return 0;
} else {
return (double)php_ifd_get32u(value, motorola_intel) / u_den;
}
case TAG_FMT_SRATIONAL:
s_den = php_ifd_get32s(4+(char *)value, motorola_intel);
if (s_den == 0) {
return 0;
} else {
return (double)php_ifd_get32s(value, motorola_intel) / s_den;
}
case TAG_FMT_SSHORT: return (signed short)php_ifd_get16u(value, motorola_intel);
case TAG_FMT_SLONG: return php_ifd_get32s(value, motorola_intel);
/* Not sure if this is correct (never seen float used in Exif format) */
case TAG_FMT_SINGLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type single");
#endif
return (double)*(float *)value;
case TAG_FMT_DOUBLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type double");
#endif
return *(double *)value;
}
return 0;
}
/* }}} */
/* {{{ exif_convert_any_to_int
* Evaluate number, be it int, rational, or float from directory. */
static size_t exif_convert_any_to_int(void *value, int format, int motorola_intel TSRMLS_DC)
{
int s_den;
unsigned u_den;
switch(format) {
case TAG_FMT_SBYTE: return *(signed char *)value;
case TAG_FMT_BYTE: return *(uchar *)value;
case TAG_FMT_USHORT: return php_ifd_get16u(value, motorola_intel);
case TAG_FMT_ULONG: return php_ifd_get32u(value, motorola_intel);
case TAG_FMT_URATIONAL:
u_den = php_ifd_get32u(4+(char *)value, motorola_intel);
if (u_den == 0) {
return 0;
} else {
return php_ifd_get32u(value, motorola_intel) / u_den;
}
case TAG_FMT_SRATIONAL:
s_den = php_ifd_get32s(4+(char *)value, motorola_intel);
if (s_den == 0) {
return 0;
} else {
return php_ifd_get32s(value, motorola_intel) / s_den;
}
case TAG_FMT_SSHORT: return php_ifd_get16u(value, motorola_intel);
case TAG_FMT_SLONG: return php_ifd_get32s(value, motorola_intel);
/* Not sure if this is correct (never seen float used in Exif format) */
case TAG_FMT_SINGLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type single");
#endif
return (size_t)*(float *)value;
case TAG_FMT_DOUBLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type double");
#endif
return (size_t)*(double *)value;
}
return 0;
}
/* }}} */
/* {{{ struct image_info_value, image_info_list
*/
#ifndef WORD
#define WORD unsigned short
#endif
#ifndef DWORD
#define DWORD unsigned int
#endif
typedef struct {
int num;
int den;
} signed_rational;
typedef struct {
unsigned int num;
unsigned int den;
} unsigned_rational;
typedef union _image_info_value {
char *s;
unsigned u;
int i;
float f;
double d;
signed_rational sr;
unsigned_rational ur;
union _image_info_value *list;
} image_info_value;
typedef struct {
WORD tag;
WORD format;
DWORD length;
DWORD dummy; /* value ptr of tiff directory entry */
char *name;
image_info_value value;
} image_info_data;
typedef struct {
int count;
image_info_data *list;
} image_info_list;
/* }}} */
/* {{{ exif_get_sectionname
Returns the name of a section
*/
#define SECTION_FILE 0
#define SECTION_COMPUTED 1
#define SECTION_ANY_TAG 2
#define SECTION_IFD0 3
#define SECTION_THUMBNAIL 4
#define SECTION_COMMENT 5
#define SECTION_APP0 6
#define SECTION_EXIF 7
#define SECTION_FPIX 8
#define SECTION_GPS 9
#define SECTION_INTEROP 10
#define SECTION_APP12 11
#define SECTION_WINXP 12
#define SECTION_MAKERNOTE 13
#define SECTION_COUNT 14
#define FOUND_FILE (1<<SECTION_FILE)
#define FOUND_COMPUTED (1<<SECTION_COMPUTED)
#define FOUND_ANY_TAG (1<<SECTION_ANY_TAG)
#define FOUND_IFD0 (1<<SECTION_IFD0)
#define FOUND_THUMBNAIL (1<<SECTION_THUMBNAIL)
#define FOUND_COMMENT (1<<SECTION_COMMENT)
#define FOUND_APP0 (1<<SECTION_APP0)
#define FOUND_EXIF (1<<SECTION_EXIF)
#define FOUND_FPIX (1<<SECTION_FPIX)
#define FOUND_GPS (1<<SECTION_GPS)
#define FOUND_INTEROP (1<<SECTION_INTEROP)
#define FOUND_APP12 (1<<SECTION_APP12)
#define FOUND_WINXP (1<<SECTION_WINXP)
#define FOUND_MAKERNOTE (1<<SECTION_MAKERNOTE)
static char *exif_get_sectionname(int section)
{
switch(section) {
case SECTION_FILE: return "FILE";
case SECTION_COMPUTED: return "COMPUTED";
case SECTION_ANY_TAG: return "ANY_TAG";
case SECTION_IFD0: return "IFD0";
case SECTION_THUMBNAIL: return "THUMBNAIL";
case SECTION_COMMENT: return "COMMENT";
case SECTION_APP0: return "APP0";
case SECTION_EXIF: return "EXIF";
case SECTION_FPIX: return "FPIX";
case SECTION_GPS: return "GPS";
case SECTION_INTEROP: return "INTEROP";
case SECTION_APP12: return "APP12";
case SECTION_WINXP: return "WINXP";
case SECTION_MAKERNOTE: return "MAKERNOTE";
}
return "";
}
static tag_table_type exif_get_tag_table(int section)
{
switch(section) {
case SECTION_FILE: return &tag_table_IFD[0];
case SECTION_COMPUTED: return &tag_table_IFD[0];
case SECTION_ANY_TAG: return &tag_table_IFD[0];
case SECTION_IFD0: return &tag_table_IFD[0];
case SECTION_THUMBNAIL: return &tag_table_IFD[0];
case SECTION_COMMENT: return &tag_table_IFD[0];
case SECTION_APP0: return &tag_table_IFD[0];
case SECTION_EXIF: return &tag_table_IFD[0];
case SECTION_FPIX: return &tag_table_IFD[0];
case SECTION_GPS: return &tag_table_GPS[0];
case SECTION_INTEROP: return &tag_table_IOP[0];
case SECTION_APP12: return &tag_table_IFD[0];
case SECTION_WINXP: return &tag_table_IFD[0];
}
return &tag_table_IFD[0];
}
/* }}} */
/* {{{ exif_get_sectionlist
Return list of sectionnames specified by sectionlist. Return value must be freed
*/
static char *exif_get_sectionlist(int sectionlist TSRMLS_DC)
{
int i, len, ml = 0;
char *sections;
for(i=0; i<SECTION_COUNT; i++) {
ml += strlen(exif_get_sectionname(i))+2;
}
sections = safe_emalloc(ml, 1, 1);
sections[0] = '\0';
len = 0;
for(i=0; i<SECTION_COUNT; i++) {
if (sectionlist&(1<<i)) {
snprintf(sections+len, ml-len, "%s, ", exif_get_sectionname(i));
len = strlen(sections);
}
}
if (len>2)
sections[len-2] = '\0';
return sections;
}
/* }}} */
/* {{{ struct image_info_type
This structure stores Exif header image elements in a simple manner
Used to store camera data as extracted from the various ways that it can be
stored in a nexif header
*/
typedef struct {
int type;
size_t size;
uchar *data;
} file_section;
typedef struct {
int count;
file_section *list;
} file_section_list;
typedef struct {
image_filetype filetype;
size_t width, height;
size_t size;
size_t offset;
char *data;
} thumbnail_data;
typedef struct {
char *value;
size_t size;
int tag;
} xp_field_type;
typedef struct {
int count;
xp_field_type *list;
} xp_field_list;
/* This structure is used to store a section of a Jpeg file. */
typedef struct {
php_stream *infile;
char *FileName;
time_t FileDateTime;
size_t FileSize;
image_filetype FileType;
int Height, Width;
int IsColor;
char *make;
char *model;
float ApertureFNumber;
float ExposureTime;
double FocalplaneUnits;
float CCDWidth;
double FocalplaneXRes;
size_t ExifImageWidth;
float FocalLength;
float Distance;
int motorola_intel; /* 1 Motorola; 0 Intel */
char *UserComment;
int UserCommentLength;
char *UserCommentEncoding;
char *encode_unicode;
char *decode_unicode_be;
char *decode_unicode_le;
char *encode_jis;
char *decode_jis_be;
char *decode_jis_le;
char *Copyright;/* EXIF standard defines Copyright as "<Photographer> [ '\0' <Editor> ] ['\0']" */
char *CopyrightPhotographer;
char *CopyrightEditor;
xp_field_list xp_fields;
thumbnail_data Thumbnail;
/* other */
int sections_found; /* FOUND_<marker> */
image_info_list info_list[SECTION_COUNT];
/* for parsing */
int read_thumbnail;
int read_all;
int ifd_nesting_level;
/* internal */
file_section_list file;
} image_info_type;
/* }}} */
/* {{{ exif_error_docref */
static void exif_error_docref(const char *docref EXIFERR_DC, const image_info_type *ImageInfo, int type, const char *format, ...)
{
va_list args;
va_start(args, format);
#ifdef EXIF_DEBUG
{
char *buf;
spprintf(&buf, 0, "%s(%d): %s", _file, _line, format);
php_verror(docref, ImageInfo->FileName?ImageInfo->FileName:"", type, buf, args TSRMLS_CC);
efree(buf);
}
#else
php_verror(docref, ImageInfo->FileName?ImageInfo->FileName:"", type, format, args TSRMLS_CC);
#endif
va_end(args);
}
/* }}} */
/* {{{ jpeg_sof_info
*/
typedef struct {
int bits_per_sample;
size_t width;
size_t height;
int num_components;
} jpeg_sof_info;
/* }}} */
/* {{{ exif_file_sections_add
Add a file_section to image_info
returns the used block or -1. if size>0 and data == NULL buffer of size is allocated
*/
static int exif_file_sections_add(image_info_type *ImageInfo, int type, size_t size, uchar *data)
{
file_section *tmp;
int count = ImageInfo->file.count;
tmp = safe_erealloc(ImageInfo->file.list, (count+1), sizeof(file_section), 0);
ImageInfo->file.list = tmp;
ImageInfo->file.list[count].type = 0xFFFF;
ImageInfo->file.list[count].data = NULL;
ImageInfo->file.list[count].size = 0;
ImageInfo->file.count = count+1;
if (!size) {
data = NULL;
} else if (data == NULL) {
data = safe_emalloc(size, 1, 0);
}
ImageInfo->file.list[count].type = type;
ImageInfo->file.list[count].data = data;
ImageInfo->file.list[count].size = size;
return count;
}
/* }}} */
/* {{{ exif_file_sections_realloc
Reallocate a file section returns 0 on success and -1 on failure
*/
static int exif_file_sections_realloc(image_info_type *ImageInfo, int section_index, size_t size TSRMLS_DC)
{
void *tmp;
/* This is not a malloc/realloc check. It is a plausibility check for the
* function parameters (requirements engineering).
*/
if (section_index >= ImageInfo->file.count) {
EXIF_ERRLOG_FSREALLOC(ImageInfo)
return -1;
}
tmp = safe_erealloc(ImageInfo->file.list[section_index].data, 1, size, 0);
ImageInfo->file.list[section_index].data = tmp;
ImageInfo->file.list[section_index].size = size;
return 0;
}
/* }}} */
/* {{{ exif_file_section_free
Discard all file_sections in ImageInfo
*/
static int exif_file_sections_free(image_info_type *ImageInfo)
{
int i;
if (ImageInfo->file.count) {
for (i=0; i<ImageInfo->file.count; i++) {
EFREE_IF(ImageInfo->file.list[i].data);
}
}
EFREE_IF(ImageInfo->file.list);
ImageInfo->file.count = 0;
return TRUE;
}
/* }}} */
/* {{{ exif_iif_add_value
Add a value to image_info
*/
static void exif_iif_add_value(image_info_type *image_info, int section_index, char *name, int tag, int format, int length, void* value, int motorola_intel TSRMLS_DC)
{
size_t idex;
void *vptr;
image_info_value *info_value;
image_info_data *info_data;
image_info_data *list;
if (length < 0) {
return;
}
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
memset(info_data, 0, sizeof(image_info_data));
info_data->tag = tag;
info_data->format = format;
info_data->length = length;
info_data->name = estrdup(name);
info_value = &info_data->value;
switch (format) {
case TAG_FMT_STRING:
if (value) {
length = php_strnlen(value, length);
info_value->s = estrndup(value, length);
info_data->length = length;
} else {
info_data->length = 0;
info_value->s = estrdup("");
}
break;
default:
/* Standard says more types possible but skip them...
* but allow users to handle data if they know how to
* So not return but use type UNDEFINED
* return;
*/
info_data->tag = TAG_FMT_UNDEFINED;/* otherwise not freed from memory */
case TAG_FMT_SBYTE:
case TAG_FMT_BYTE:
/* in contrast to strings bytes do not need to allocate buffer for NULL if length==0 */
if (!length)
break;
case TAG_FMT_UNDEFINED:
if (value) {
if (tag == TAG_MAKER_NOTE) {
length = MIN(length, strlen(value));
}
/* do not recompute length here */
info_value->s = estrndup(value, length);
info_data->length = length;
} else {
info_data->length = 0;
info_value->s = estrdup("");
}
break;
case TAG_FMT_USHORT:
case TAG_FMT_ULONG:
case TAG_FMT_URATIONAL:
case TAG_FMT_SSHORT:
case TAG_FMT_SLONG:
case TAG_FMT_SRATIONAL:
case TAG_FMT_SINGLE:
case TAG_FMT_DOUBLE:
if (length==0) {
break;
} else
if (length>1) {
info_value->list = safe_emalloc(length, sizeof(image_info_value), 0);
} else {
info_value = &info_data->value;
}
for (idex=0,vptr=value; idex<(size_t)length; idex++,vptr=(char *) vptr + php_tiff_bytes_per_format[format]) {
if (length>1) {
info_value = &info_data->value.list[idex];
}
switch (format) {
case TAG_FMT_USHORT:
info_value->u = php_ifd_get16u(vptr, motorola_intel);
break;
case TAG_FMT_ULONG:
info_value->u = php_ifd_get32u(vptr, motorola_intel);
break;
case TAG_FMT_URATIONAL:
info_value->ur.num = php_ifd_get32u(vptr, motorola_intel);
info_value->ur.den = php_ifd_get32u(4+(char *)vptr, motorola_intel);
break;
case TAG_FMT_SSHORT:
info_value->i = php_ifd_get16s(vptr, motorola_intel);
break;
case TAG_FMT_SLONG:
info_value->i = php_ifd_get32s(vptr, motorola_intel);
break;
case TAG_FMT_SRATIONAL:
info_value->sr.num = php_ifd_get32u(vptr, motorola_intel);
info_value->sr.den = php_ifd_get32u(4+(char *)vptr, motorola_intel);
break;
case TAG_FMT_SINGLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Found value of type single");
#endif
info_value->f = *(float *)value;
case TAG_FMT_DOUBLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Found value of type double");
#endif
info_value->d = *(double *)value;
break;
}
}
}
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
/* }}} */
/* {{{ exif_iif_add_tag
Add a tag from IFD to image_info
*/
static void exif_iif_add_tag(image_info_type *image_info, int section_index, char *name, int tag, int format, size_t length, void* value TSRMLS_DC)
{
exif_iif_add_value(image_info, section_index, name, tag, format, (int)length, value, image_info->motorola_intel TSRMLS_CC);
}
/* }}} */
/* {{{ exif_iif_add_int
Add an int value to image_info
*/
static void exif_iif_add_int(image_info_type *image_info, int section_index, char *name, int value TSRMLS_DC)
{
image_info_data *info_data;
image_info_data *list;
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
info_data->tag = TAG_NONE;
info_data->format = TAG_FMT_SLONG;
info_data->length = 1;
info_data->name = estrdup(name);
info_data->value.i = value;
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
/* }}} */
/* {{{ exif_iif_add_str
Add a string value to image_info MUST BE NUL TERMINATED
*/
static void exif_iif_add_str(image_info_type *image_info, int section_index, char *name, char *value TSRMLS_DC)
{
image_info_data *info_data;
image_info_data *list;
if (value) {
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
info_data->tag = TAG_NONE;
info_data->format = TAG_FMT_STRING;
info_data->length = 1;
info_data->name = estrdup(name);
info_data->value.s = estrdup(value);
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
}
/* }}} */
/* {{{ exif_iif_add_fmt
Add a format string value to image_info MUST BE NUL TERMINATED
*/
static void exif_iif_add_fmt(image_info_type *image_info, int section_index, char *name TSRMLS_DC, char *value, ...)
{
char *tmp;
va_list arglist;
va_start(arglist, value);
if (value) {
vspprintf(&tmp, 0, value, arglist);
exif_iif_add_str(image_info, section_index, name, tmp TSRMLS_CC);
efree(tmp);
}
va_end(arglist);
}
/* }}} */
/* {{{ exif_iif_add_str
Add a string value to image_info MUST BE NUL TERMINATED
*/
static void exif_iif_add_buffer(image_info_type *image_info, int section_index, char *name, int length, char *value TSRMLS_DC)
{
image_info_data *info_data;
image_info_data *list;
if (value) {
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
info_data->tag = TAG_NONE;
info_data->format = TAG_FMT_UNDEFINED;
info_data->length = length;
info_data->name = estrdup(name);
info_data->value.s = safe_emalloc(length, 1, 1);
memcpy(info_data->value.s, value, length);
info_data->value.s[length] = 0;
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
}
/* }}} */
/* {{{ exif_iif_free
Free memory allocated for image_info
*/
static void exif_iif_free(image_info_type *image_info, int section_index) {
int i;
void *f; /* faster */
if (image_info->info_list[section_index].count) {
for (i=0; i < image_info->info_list[section_index].count; i++) {
if ((f=image_info->info_list[section_index].list[i].name) != NULL) {
efree(f);
}
switch(image_info->info_list[section_index].list[i].format) {
case TAG_FMT_SBYTE:
case TAG_FMT_BYTE:
/* in contrast to strings bytes do not need to allocate buffer for NULL if length==0 */
if (image_info->info_list[section_index].list[i].length<1)
break;
default:
case TAG_FMT_UNDEFINED:
case TAG_FMT_STRING:
if ((f=image_info->info_list[section_index].list[i].value.s) != NULL) {
efree(f);
}
break;
case TAG_FMT_USHORT:
case TAG_FMT_ULONG:
case TAG_FMT_URATIONAL:
case TAG_FMT_SSHORT:
case TAG_FMT_SLONG:
case TAG_FMT_SRATIONAL:
case TAG_FMT_SINGLE:
case TAG_FMT_DOUBLE:
/* nothing to do here */
if (image_info->info_list[section_index].list[i].length > 1) {
if ((f=image_info->info_list[section_index].list[i].value.list) != NULL) {
efree(f);
}
}
break;
}
}
}
EFREE_IF(image_info->info_list[section_index].list);
}
/* }}} */
/* {{{ add_assoc_image_info
* Add image_info to associative array value. */
static void add_assoc_image_info(zval *value, int sub_array, image_info_type *image_info, int section_index TSRMLS_DC)
{
char buffer[64], *val, *name, uname[64];
int i, ap, l, b, idx=0, unknown=0;
#ifdef EXIF_DEBUG
int info_tag;
#endif
image_info_value *info_value;
image_info_data *info_data;
zval *tmpi, *array = NULL;
#ifdef EXIF_DEBUG
/* php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Adding %d infos from section %s", image_info->info_list[section_index].count, exif_get_sectionname(section_index));*/
#endif
if (image_info->info_list[section_index].count) {
if (sub_array) {
MAKE_STD_ZVAL(tmpi);
array_init(tmpi);
} else {
tmpi = value;
}
for(i=0; i<image_info->info_list[section_index].count; i++) {
info_data = &image_info->info_list[section_index].list[i];
#ifdef EXIF_DEBUG
info_tag = info_data->tag; /* conversion */
#endif
info_value = &info_data->value;
if (!(name = info_data->name)) {
snprintf(uname, sizeof(uname), "%d", unknown++);
name = uname;
}
#ifdef EXIF_DEBUG
/* php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Adding infos: tag(0x%04X,%12s,L=0x%04X): %s", info_tag, exif_get_tagname(info_tag, buffer, -12, exif_get_tag_table(section_index) TSRMLS_CC), info_data->length, info_data->format==TAG_FMT_STRING?(info_value&&info_value->s?info_value->s:"<no data>"):exif_get_tagformat(info_data->format));*/
#endif
if (info_data->length==0) {
add_assoc_null(tmpi, name);
} else {
switch (info_data->format) {
default:
/* Standard says more types possible but skip them...
* but allow users to handle data if they know how to
* So not return but use type UNDEFINED
* return;
*/
case TAG_FMT_BYTE:
case TAG_FMT_SBYTE:
case TAG_FMT_UNDEFINED:
if (!info_value->s) {
add_assoc_stringl(tmpi, name, "", 0, 1);
} else {
add_assoc_stringl(tmpi, name, info_value->s, info_data->length, 1);
}
break;
case TAG_FMT_STRING:
if (!(val = info_value->s)) {
val = "";
}
if (section_index==SECTION_COMMENT) {
add_index_string(tmpi, idx++, val, 1);
} else {
add_assoc_string(tmpi, name, val, 1);
}
break;
case TAG_FMT_URATIONAL:
case TAG_FMT_SRATIONAL:
/*case TAG_FMT_BYTE:
case TAG_FMT_SBYTE:*/
case TAG_FMT_USHORT:
case TAG_FMT_SSHORT:
case TAG_FMT_SINGLE:
case TAG_FMT_DOUBLE:
case TAG_FMT_ULONG:
case TAG_FMT_SLONG:
/* now the rest, first see if it becomes an array */
if ((l = info_data->length) > 1) {
array = NULL;
MAKE_STD_ZVAL(array);
array_init(array);
}
for(ap=0; ap<l; ap++) {
if (l>1) {
info_value = &info_data->value.list[ap];
}
switch (info_data->format) {
case TAG_FMT_BYTE:
if (l>1) {
info_value = &info_data->value;
for (b=0;b<l;b++) {
add_index_long(array, b, (int)(info_value->s[b]));
}
break;
}
case TAG_FMT_USHORT:
case TAG_FMT_ULONG:
if (l==1) {
add_assoc_long(tmpi, name, (int)info_value->u);
} else {
add_index_long(array, ap, (int)info_value->u);
}
break;
case TAG_FMT_URATIONAL:
snprintf(buffer, sizeof(buffer), "%i/%i", info_value->ur.num, info_value->ur.den);
if (l==1) {
add_assoc_string(tmpi, name, buffer, 1);
} else {
add_index_string(array, ap, buffer, 1);
}
break;
case TAG_FMT_SBYTE:
if (l>1) {
info_value = &info_data->value;
for (b=0;b<l;b++) {
add_index_long(array, ap, (int)info_value->s[b]);
}
break;
}
case TAG_FMT_SSHORT:
case TAG_FMT_SLONG:
if (l==1) {
add_assoc_long(tmpi, name, info_value->i);
} else {
add_index_long(array, ap, info_value->i);
}
break;
case TAG_FMT_SRATIONAL:
snprintf(buffer, sizeof(buffer), "%i/%i", info_value->sr.num, info_value->sr.den);
if (l==1) {
add_assoc_string(tmpi, name, buffer, 1);
} else {
add_index_string(array, ap, buffer, 1);
}
break;
case TAG_FMT_SINGLE:
if (l==1) {
add_assoc_double(tmpi, name, info_value->f);
} else {
add_index_double(array, ap, info_value->f);
}
break;
case TAG_FMT_DOUBLE:
if (l==1) {
add_assoc_double(tmpi, name, info_value->d);
} else {
add_index_double(array, ap, info_value->d);
}
break;
}
info_value = &info_data->value.list[ap];
}
if (l>1) {
add_assoc_zval(tmpi, name, array);
}
break;
}
}
}
if (sub_array) {
add_assoc_zval(value, exif_get_sectionname(section_index), tmpi);
}
}
}
/* }}} */
/* {{{ Markers
JPEG markers consist of one or more 0xFF bytes, followed by a marker
code byte (which is not an FF). Here are the marker codes of interest
in this program. (See jdmarker.c for a more complete list.)
*/
#define M_TEM 0x01 /* temp for arithmetic coding */
#define M_RES 0x02 /* reserved */
#define M_SOF0 0xC0 /* Start Of Frame N */
#define M_SOF1 0xC1 /* N indicates which compression process */
#define M_SOF2 0xC2 /* Only SOF0-SOF2 are now in common use */
#define M_SOF3 0xC3
#define M_DHT 0xC4
#define M_SOF5 0xC5 /* NB: codes C4 and CC are NOT SOF markers */
#define M_SOF6 0xC6
#define M_SOF7 0xC7
#define M_JPEG 0x08 /* reserved for extensions */
#define M_SOF9 0xC9
#define M_SOF10 0xCA
#define M_SOF11 0xCB
#define M_DAC 0xCC /* arithmetic table */
#define M_SOF13 0xCD
#define M_SOF14 0xCE
#define M_SOF15 0xCF
#define M_RST0 0xD0 /* restart segment */
#define M_RST1 0xD1
#define M_RST2 0xD2
#define M_RST3 0xD3
#define M_RST4 0xD4
#define M_RST5 0xD5
#define M_RST6 0xD6
#define M_RST7 0xD7
#define M_SOI 0xD8 /* Start Of Image (beginning of datastream) */
#define M_EOI 0xD9 /* End Of Image (end of datastream) */
#define M_SOS 0xDA /* Start Of Scan (begins compressed data) */
#define M_DQT 0xDB
#define M_DNL 0xDC
#define M_DRI 0xDD
#define M_DHP 0xDE
#define M_EXP 0xDF
#define M_APP0 0xE0 /* JPEG: 'JFIFF' AND (additional 'JFXX') */
#define M_EXIF 0xE1 /* Exif Attribute Information */
#define M_APP2 0xE2 /* Flash Pix Extension Data? */
#define M_APP3 0xE3
#define M_APP4 0xE4
#define M_APP5 0xE5
#define M_APP6 0xE6
#define M_APP7 0xE7
#define M_APP8 0xE8
#define M_APP9 0xE9
#define M_APP10 0xEA
#define M_APP11 0xEB
#define M_APP12 0xEC
#define M_APP13 0xED /* IPTC International Press Telecommunications Council */
#define M_APP14 0xEE /* Software, Copyright? */
#define M_APP15 0xEF
#define M_JPG0 0xF0
#define M_JPG1 0xF1
#define M_JPG2 0xF2
#define M_JPG3 0xF3
#define M_JPG4 0xF4
#define M_JPG5 0xF5
#define M_JPG6 0xF6
#define M_JPG7 0xF7
#define M_JPG8 0xF8
#define M_JPG9 0xF9
#define M_JPG10 0xFA
#define M_JPG11 0xFB
#define M_JPG12 0xFC
#define M_JPG13 0xFD
#define M_COM 0xFE /* COMment */
#define M_PSEUDO 0x123 /* Extra value. */
/* }}} */
/* {{{ jpeg2000 markers
*/
/* Markers x30 - x3F do not have a segment */
/* Markers x00, x01, xFE, xC0 - xDF ISO/IEC 10918-1 -> M_<xx> */
/* Markers xF0 - xF7 ISO/IEC 10918-3 */
/* Markers xF7 - xF8 ISO/IEC 14495-1 */
/* XY=Main/Tile-header:(R:required, N:not_allowed, O:optional, L:last_marker) */
#define JC_SOC 0x4F /* NN, Start of codestream */
#define JC_SIZ 0x51 /* RN, Image and tile size */
#define JC_COD 0x52 /* RO, Codeing style defaulte */
#define JC_COC 0x53 /* OO, Coding style component */
#define JC_TLM 0x55 /* ON, Tile part length main header */
#define JC_PLM 0x57 /* ON, Packet length main header */
#define JC_PLT 0x58 /* NO, Packet length tile part header */
#define JC_QCD 0x5C /* RO, Quantization default */
#define JC_QCC 0x5D /* OO, Quantization component */
#define JC_RGN 0x5E /* OO, Region of interest */
#define JC_POD 0x5F /* OO, Progression order default */
#define JC_PPM 0x60 /* ON, Packed packet headers main header */
#define JC_PPT 0x61 /* NO, Packet packet headers tile part header */
#define JC_CME 0x64 /* OO, Comment: "LL E <text>" E=0:binary, E=1:ascii */
#define JC_SOT 0x90 /* NR, Start of tile */
#define JC_SOP 0x91 /* NO, Start of packeter default */
#define JC_EPH 0x92 /* NO, End of packet header */
#define JC_SOD 0x93 /* NL, Start of data */
#define JC_EOC 0xD9 /* NN, End of codestream */
/* }}} */
/* {{{ exif_process_COM
Process a COM marker.
We want to print out the marker contents as legible text;
we must guard against random junk and varying newline representations.
*/
static void exif_process_COM (image_info_type *image_info, char *value, size_t length TSRMLS_DC)
{
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_STRING, length-2, value+2 TSRMLS_CC);
}
/* }}} */
/* {{{ exif_process_CME
Process a CME marker.
We want to print out the marker contents as legible text;
we must guard against random junk and varying newline representations.
*/
#ifdef EXIF_JPEG2000
static void exif_process_CME (image_info_type *image_info, char *value, size_t length TSRMLS_DC)
{
if (length>3) {
switch(value[2]) {
case 0:
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_UNDEFINED, length, value TSRMLS_CC);
break;
case 1:
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_STRING, length, value);
break;
default:
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Undefined JPEG2000 comment encoding");
break;
}
} else {
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_UNDEFINED, 0, NULL);
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "JPEG2000 comment section too small");
}
}
#endif
/* }}} */
/* {{{ exif_process_SOFn
* Process a SOFn marker. This is useful for the image dimensions */
static void exif_process_SOFn (uchar *Data, int marker, jpeg_sof_info *result)
{
/* 0xFF SOSn SectLen(2) Bits(1) Height(2) Width(2) Channels(1) 3*Channels (1) */
result->bits_per_sample = Data[2];
result->height = php_jpg_get16(Data+3);
result->width = php_jpg_get16(Data+5);
result->num_components = Data[7];
/* switch (marker) {
case M_SOF0: process = "Baseline"; break;
case M_SOF1: process = "Extended sequential"; break;
case M_SOF2: process = "Progressive"; break;
case M_SOF3: process = "Lossless"; break;
case M_SOF5: process = "Differential sequential"; break;
case M_SOF6: process = "Differential progressive"; break;
case M_SOF7: process = "Differential lossless"; break;
case M_SOF9: process = "Extended sequential, arithmetic coding"; break;
case M_SOF10: process = "Progressive, arithmetic coding"; break;
case M_SOF11: process = "Lossless, arithmetic coding"; break;
case M_SOF13: process = "Differential sequential, arithmetic coding"; break;
case M_SOF14: process = "Differential progressive, arithmetic coding"; break;
case M_SOF15: process = "Differential lossless, arithmetic coding"; break;
default: process = "Unknown"; break;
}*/
}
/* }}} */
/* forward declarations */
static int exif_process_IFD_in_JPEG(image_info_type *ImageInfo, char *dir_start, char *offset_base, size_t IFDlength, size_t displacement, int section_index TSRMLS_DC);
static int exif_process_IFD_TAG( image_info_type *ImageInfo, char *dir_entry, char *offset_base, size_t IFDlength, size_t displacement, int section_index, int ReadNextIFD, tag_table_type tag_table TSRMLS_DC);
/* {{{ exif_get_markername
Get name of marker */
#ifdef EXIF_DEBUG
static char * exif_get_markername(int marker)
{
switch(marker) {
case 0xC0: return "SOF0";
case 0xC1: return "SOF1";
case 0xC2: return "SOF2";
case 0xC3: return "SOF3";
case 0xC4: return "DHT";
case 0xC5: return "SOF5";
case 0xC6: return "SOF6";
case 0xC7: return "SOF7";
case 0xC9: return "SOF9";
case 0xCA: return "SOF10";
case 0xCB: return "SOF11";
case 0xCD: return "SOF13";
case 0xCE: return "SOF14";
case 0xCF: return "SOF15";
case 0xD8: return "SOI";
case 0xD9: return "EOI";
case 0xDA: return "SOS";
case 0xDB: return "DQT";
case 0xDC: return "DNL";
case 0xDD: return "DRI";
case 0xDE: return "DHP";
case 0xDF: return "EXP";
case 0xE0: return "APP0";
case 0xE1: return "EXIF";
case 0xE2: return "FPIX";
case 0xE3: return "APP3";
case 0xE4: return "APP4";
case 0xE5: return "APP5";
case 0xE6: return "APP6";
case 0xE7: return "APP7";
case 0xE8: return "APP8";
case 0xE9: return "APP9";
case 0xEA: return "APP10";
case 0xEB: return "APP11";
case 0xEC: return "APP12";
case 0xED: return "APP13";
case 0xEE: return "APP14";
case 0xEF: return "APP15";
case 0xF0: return "JPG0";
case 0xFD: return "JPG13";
case 0xFE: return "COM";
case 0x01: return "TEM";
}
return "Unknown";
}
#endif
/* }}} */
/* {{{ proto string exif_tagname(index)
Get headername for index or false if not defined */
PHP_FUNCTION(exif_tagname)
{
long tag;
char *szTemp;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &tag) == FAILURE) {
return;
}
szTemp = exif_get_tagname(tag, NULL, 0, tag_table_IFD TSRMLS_CC);
if (tag < 0 || !szTemp || !szTemp[0]) {
RETURN_FALSE;
}
RETURN_STRING(szTemp, 1)
}
/* }}} */
/* {{{ exif_ifd_make_value
* Create a value for an ifd from an info_data pointer */
static void* exif_ifd_make_value(image_info_data *info_data, int motorola_intel TSRMLS_DC) {
size_t byte_count;
char *value_ptr, *data_ptr;
size_t i;
image_info_value *info_value;
byte_count = php_tiff_bytes_per_format[info_data->format] * info_data->length;
value_ptr = safe_emalloc(max(byte_count, 4), 1, 0);
memset(value_ptr, 0, 4);
if (!info_data->length) {
return value_ptr;
}
if (info_data->format == TAG_FMT_UNDEFINED || info_data->format == TAG_FMT_STRING
|| (byte_count>1 && (info_data->format == TAG_FMT_BYTE || info_data->format == TAG_FMT_SBYTE))
) {
memmove(value_ptr, info_data->value.s, byte_count);
return value_ptr;
} else if (info_data->format == TAG_FMT_BYTE) {
*value_ptr = info_data->value.u;
return value_ptr;
} else if (info_data->format == TAG_FMT_SBYTE) {
*value_ptr = info_data->value.i;
return value_ptr;
} else {
data_ptr = value_ptr;
for(i=0; i<info_data->length; i++) {
if (info_data->length==1) {
info_value = &info_data->value;
} else {
info_value = &info_data->value.list[i];
}
switch(info_data->format) {
case TAG_FMT_USHORT:
php_ifd_set16u(data_ptr, info_value->u, motorola_intel);
data_ptr += 2;
break;
case TAG_FMT_ULONG:
php_ifd_set32u(data_ptr, info_value->u, motorola_intel);
data_ptr += 4;
break;
case TAG_FMT_SSHORT:
php_ifd_set16u(data_ptr, info_value->i, motorola_intel);
data_ptr += 2;
break;
case TAG_FMT_SLONG:
php_ifd_set32u(data_ptr, info_value->i, motorola_intel);
data_ptr += 4;
break;
case TAG_FMT_URATIONAL:
php_ifd_set32u(data_ptr, info_value->sr.num, motorola_intel);
php_ifd_set32u(data_ptr+4, info_value->sr.den, motorola_intel);
data_ptr += 8;
break;
case TAG_FMT_SRATIONAL:
php_ifd_set32u(data_ptr, info_value->ur.num, motorola_intel);
php_ifd_set32u(data_ptr+4, info_value->ur.den, motorola_intel);
data_ptr += 8;
break;
case TAG_FMT_SINGLE:
memmove(data_ptr, &info_value->f, 4);
data_ptr += 4;
break;
case TAG_FMT_DOUBLE:
memmove(data_ptr, &info_value->d, 8);
data_ptr += 8;
break;
}
}
}
return value_ptr;
}
/* }}} */
/* {{{ exif_thumbnail_build
* Check and build thumbnail */
static void exif_thumbnail_build(image_info_type *ImageInfo TSRMLS_DC) {
size_t new_size, new_move, new_value;
char *new_data;
void *value_ptr;
int i, byte_count;
image_info_list *info_list;
image_info_data *info_data;
#ifdef EXIF_DEBUG
char tagname[64];
#endif
if (!ImageInfo->read_thumbnail || !ImageInfo->Thumbnail.offset || !ImageInfo->Thumbnail.size) {
return; /* ignore this call */
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: filetype = %d", ImageInfo->Thumbnail.filetype);
#endif
switch(ImageInfo->Thumbnail.filetype) {
default:
case IMAGE_FILETYPE_JPEG:
/* done */
break;
case IMAGE_FILETYPE_TIFF_II:
case IMAGE_FILETYPE_TIFF_MM:
info_list = &ImageInfo->info_list[SECTION_THUMBNAIL];
new_size = 8 + 2 + info_list->count * 12 + 4;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: size of signature + directory(%d): 0x%02X", info_list->count, new_size);
#endif
new_value= new_size; /* offset for ifd values outside ifd directory */
for (i=0; i<info_list->count; i++) {
info_data = &info_list->list[i];
byte_count = php_tiff_bytes_per_format[info_data->format] * info_data->length;
if (byte_count > 4) {
new_size += byte_count;
}
}
new_move = new_size;
new_data = safe_erealloc(ImageInfo->Thumbnail.data, 1, ImageInfo->Thumbnail.size, new_size);
ImageInfo->Thumbnail.data = new_data;
memmove(ImageInfo->Thumbnail.data + new_move, ImageInfo->Thumbnail.data, ImageInfo->Thumbnail.size);
ImageInfo->Thumbnail.size += new_size;
/* fill in data */
if (ImageInfo->motorola_intel) {
memmove(new_data, "MM\x00\x2a\x00\x00\x00\x08", 8);
} else {
memmove(new_data, "II\x2a\x00\x08\x00\x00\x00", 8);
}
new_data += 8;
php_ifd_set16u(new_data, info_list->count, ImageInfo->motorola_intel);
new_data += 2;
for (i=0; i<info_list->count; i++) {
info_data = &info_list->list[i];
byte_count = php_tiff_bytes_per_format[info_data->format] * info_data->length;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: process tag(x%04X=%s): %s%s (%d bytes)", info_data->tag, exif_get_tagname(info_data->tag, tagname, -12, tag_table_IFD TSRMLS_CC), (info_data->length>1)&&info_data->format!=TAG_FMT_UNDEFINED&&info_data->format!=TAG_FMT_STRING?"ARRAY OF ":"", exif_get_tagformat(info_data->format), byte_count);
#endif
if (info_data->tag==TAG_STRIP_OFFSETS || info_data->tag==TAG_JPEG_INTERCHANGE_FORMAT) {
php_ifd_set16u(new_data + 0, info_data->tag, ImageInfo->motorola_intel);
php_ifd_set16u(new_data + 2, TAG_FMT_ULONG, ImageInfo->motorola_intel);
php_ifd_set32u(new_data + 4, 1, ImageInfo->motorola_intel);
php_ifd_set32u(new_data + 8, new_move, ImageInfo->motorola_intel);
} else {
php_ifd_set16u(new_data + 0, info_data->tag, ImageInfo->motorola_intel);
php_ifd_set16u(new_data + 2, info_data->format, ImageInfo->motorola_intel);
php_ifd_set32u(new_data + 4, info_data->length, ImageInfo->motorola_intel);
value_ptr = exif_ifd_make_value(info_data, ImageInfo->motorola_intel TSRMLS_CC);
if (byte_count <= 4) {
memmove(new_data+8, value_ptr, 4);
} else {
php_ifd_set32u(new_data+8, new_value, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: writing with value offset: 0x%04X + 0x%02X", new_value, byte_count);
#endif
memmove(ImageInfo->Thumbnail.data+new_value, value_ptr, byte_count);
new_value += byte_count;
}
efree(value_ptr);
}
new_data += 12;
}
memset(new_data, 0, 4); /* next ifd pointer */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: created");
#endif
break;
}
}
/* }}} */
/* {{{ exif_thumbnail_extract
* Grab the thumbnail, corrected */
static void exif_thumbnail_extract(image_info_type *ImageInfo, char *offset, size_t length TSRMLS_DC) {
if (ImageInfo->Thumbnail.data) {
exif_error_docref("exif_read_data#error_mult_thumb" EXIFERR_CC, ImageInfo, E_WARNING, "Multiple possible thumbnails");
return; /* Should not happen */
}
if (!ImageInfo->read_thumbnail) {
return; /* ignore this call */
}
/* according to exif2.1, the thumbnail is not supposed to be greater than 64K */
if (ImageInfo->Thumbnail.size >= 65536
|| ImageInfo->Thumbnail.size <= 0
|| ImageInfo->Thumbnail.offset <= 0
) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Illegal thumbnail size/offset");
return;
}
/* Check to make sure we are not going to go past the ExifLength */
if ((ImageInfo->Thumbnail.offset + ImageInfo->Thumbnail.size) > length) {
EXIF_ERRLOG_THUMBEOF(ImageInfo)
return;
}
ImageInfo->Thumbnail.data = estrndup(offset + ImageInfo->Thumbnail.offset, ImageInfo->Thumbnail.size);
exif_thumbnail_build(ImageInfo TSRMLS_CC);
}
/* }}} */
/* {{{ exif_process_undefined
* Copy a string/buffer in Exif header to a character string and return length of allocated buffer if any. */
static int exif_process_undefined(char **result, char *value, size_t byte_count TSRMLS_DC) {
/* we cannot use strlcpy - here the problem is that we have to copy NUL
* chars up to byte_count, we also have to add a single NUL character to
* force end of string.
* estrndup does not return length
*/
if (byte_count) {
(*result) = estrndup(value, byte_count); /* NULL @ byte_count!!! */
return byte_count+1;
}
return 0;
}
/* }}} */
/* {{{ exif_process_string_raw
* Copy a string in Exif header to a character string returns length of allocated buffer if any. */
static int exif_process_string_raw(char **result, char *value, size_t byte_count) {
/* we cannot use strlcpy - here the problem is that we have to copy NUL
* chars up to byte_count, we also have to add a single NUL character to
* force end of string.
*/
if (byte_count) {
(*result) = safe_emalloc(byte_count, 1, 1);
memcpy(*result, value, byte_count);
(*result)[byte_count] = '\0';
return byte_count+1;
}
return 0;
}
/* }}} */
/* {{{ exif_process_string
* Copy a string in Exif header to a character string and return length of allocated buffer if any.
* In contrast to exif_process_string this function does always return a string buffer */
static int exif_process_string(char **result, char *value, size_t byte_count TSRMLS_DC) {
/* we cannot use strlcpy - here the problem is that we cannot use strlen to
* determin length of string and we cannot use strlcpy with len=byte_count+1
* because then we might get into an EXCEPTION if we exceed an allocated
* memory page...so we use php_strnlen in conjunction with memcpy and add the NUL
* char.
* estrdup would sometimes allocate more memory and does not return length
*/
if ((byte_count=php_strnlen(value, byte_count)) > 0) {
return exif_process_undefined(result, value, byte_count TSRMLS_CC);
}
(*result) = estrndup("", 1); /* force empty string */
return byte_count+1;
}
/* }}} */
/* {{{ exif_process_user_comment
* Process UserComment in IFD. */
static int exif_process_user_comment(image_info_type *ImageInfo, char **pszInfoPtr, char **pszEncoding, char *szValuePtr, int ByteCount TSRMLS_DC)
{
int a;
char *decode;
size_t len;;
*pszEncoding = NULL;
/* Copy the comment */
if (ByteCount>=8) {
const zend_encoding *from, *to;
if (!memcmp(szValuePtr, "UNICODE\0", 8)) {
*pszEncoding = estrdup((const char*)szValuePtr);
szValuePtr = szValuePtr+8;
ByteCount -= 8;
/* First try to detect BOM: ZERO WIDTH NOBREAK SPACE (FEFF 16)
* since we have no encoding support for the BOM yet we skip that.
*/
if (!memcmp(szValuePtr, "\xFE\xFF", 2)) {
decode = "UCS-2BE";
szValuePtr = szValuePtr+2;
ByteCount -= 2;
} else if (!memcmp(szValuePtr, "\xFF\xFE", 2)) {
decode = "UCS-2LE";
szValuePtr = szValuePtr+2;
ByteCount -= 2;
} else if (ImageInfo->motorola_intel) {
decode = ImageInfo->decode_unicode_be;
} else {
decode = ImageInfo->decode_unicode_le;
}
to = zend_multibyte_fetch_encoding(ImageInfo->encode_unicode TSRMLS_CC);
from = zend_multibyte_fetch_encoding(decode TSRMLS_CC);
/* XXX this will fail again if encoding_converter returns on error something different than SIZE_MAX */
if (!to || !from || zend_multibyte_encoding_converter(
(unsigned char**)pszInfoPtr,
&len,
(unsigned char*)szValuePtr,
ByteCount,
to,
from
TSRMLS_CC) == (size_t)-1) {
len = exif_process_string_raw(pszInfoPtr, szValuePtr, ByteCount);
}
return len;
} else if (!memcmp(szValuePtr, "ASCII\0\0\0", 8)) {
*pszEncoding = estrdup((const char*)szValuePtr);
szValuePtr = szValuePtr+8;
ByteCount -= 8;
} else if (!memcmp(szValuePtr, "JIS\0\0\0\0\0", 8)) {
/* JIS should be tanslated to MB or we leave it to the user - leave it to the user */
*pszEncoding = estrdup((const char*)szValuePtr);
szValuePtr = szValuePtr+8;
ByteCount -= 8;
/* XXX this will fail again if encoding_converter returns on error something different than SIZE_MAX */
to = zend_multibyte_fetch_encoding(ImageInfo->encode_jis TSRMLS_CC);
from = zend_multibyte_fetch_encoding(ImageInfo->motorola_intel ? ImageInfo->decode_jis_be : ImageInfo->decode_jis_le TSRMLS_CC);
if (!to || !from || zend_multibyte_encoding_converter(
(unsigned char**)pszInfoPtr,
&len,
(unsigned char*)szValuePtr,
ByteCount,
to,
from
TSRMLS_CC) == (size_t)-1) {
len = exif_process_string_raw(pszInfoPtr, szValuePtr, ByteCount);
}
return len;
} else if (!memcmp(szValuePtr, "\0\0\0\0\0\0\0\0", 8)) {
/* 8 NULL means undefined and should be ASCII... */
*pszEncoding = estrdup("UNDEFINED");
szValuePtr = szValuePtr+8;
ByteCount -= 8;
}
}
/* Olympus has this padded with trailing spaces. Remove these first. */
if (ByteCount>0) {
for (a=ByteCount-1;a && szValuePtr[a]==' ';a--) {
(szValuePtr)[a] = '\0';
}
}
/* normal text without encoding */
exif_process_string(pszInfoPtr, szValuePtr, ByteCount TSRMLS_CC);
return strlen(*pszInfoPtr);
}
/* }}} */
/* {{{ exif_process_unicode
* Process unicode field in IFD. */
static int exif_process_unicode(image_info_type *ImageInfo, xp_field_type *xp_field, int tag, char *szValuePtr, int ByteCount TSRMLS_DC)
{
xp_field->tag = tag;
xp_field->value = NULL;
/* XXX this will fail again if encoding_converter returns on error something different than SIZE_MAX */
if (zend_multibyte_encoding_converter(
(unsigned char**)&xp_field->value,
&xp_field->size,
(unsigned char*)szValuePtr,
ByteCount,
zend_multibyte_fetch_encoding(ImageInfo->encode_unicode TSRMLS_CC),
zend_multibyte_fetch_encoding(ImageInfo->motorola_intel ? ImageInfo->decode_unicode_be : ImageInfo->decode_unicode_le TSRMLS_CC)
TSRMLS_CC) == (size_t)-1) {
xp_field->size = exif_process_string_raw(&xp_field->value, szValuePtr, ByteCount);
}
return xp_field->size;
}
/* }}} */
/* {{{ exif_process_IFD_in_MAKERNOTE
* Process nested IFDs directories in Maker Note. */
static int exif_process_IFD_in_MAKERNOTE(image_info_type *ImageInfo, char * value_ptr, int value_len, char *offset_base, size_t IFDlength, size_t displacement TSRMLS_DC)
{
int de, i=0, section_index = SECTION_MAKERNOTE;
int NumDirEntries, old_motorola_intel, offset_diff;
const maker_note_type *maker_note;
char *dir_start;
for (i=0; i<=sizeof(maker_note_array)/sizeof(maker_note_type); i++) {
if (i==sizeof(maker_note_array)/sizeof(maker_note_type)) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "No maker note data found. Detected maker: %s (length = %d)", ImageInfo->make, strlen(ImageInfo->make));
#endif
/* unknown manufacturer, not an error, use it as a string */
return TRUE;
}
maker_note = maker_note_array+i;
/*exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "check (%s,%s)", maker_note->make?maker_note->make:"", maker_note->model?maker_note->model:"");*/
if (maker_note->make && (!ImageInfo->make || strcmp(maker_note->make, ImageInfo->make)))
continue;
if (maker_note->model && (!ImageInfo->model || strcmp(maker_note->model, ImageInfo->model)))
continue;
if (maker_note->id_string && strncmp(maker_note->id_string, value_ptr, maker_note->id_string_len))
continue;
break;
}
if (maker_note->offset >= value_len) {
/* Do not go past the value end */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "IFD data too short: 0x%04X offset 0x%04X", value_len, maker_note->offset);
return FALSE;
}
dir_start = value_ptr + maker_note->offset;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process %s @x%04X + 0x%04X=%d: %s", exif_get_sectionname(section_index), (int)dir_start-(int)offset_base+maker_note->offset+displacement, value_len, value_len, exif_char_dump(value_ptr, value_len, (int)dir_start-(int)offset_base+maker_note->offset+displacement));
#endif
ImageInfo->sections_found |= FOUND_MAKERNOTE;
old_motorola_intel = ImageInfo->motorola_intel;
switch (maker_note->byte_order) {
case MN_ORDER_INTEL:
ImageInfo->motorola_intel = 0;
break;
case MN_ORDER_MOTOROLA:
ImageInfo->motorola_intel = 1;
break;
default:
case MN_ORDER_NORMAL:
break;
}
NumDirEntries = php_ifd_get16u(dir_start, ImageInfo->motorola_intel);
switch (maker_note->offset_mode) {
case MN_OFFSET_MAKER:
offset_base = value_ptr;
break;
case MN_OFFSET_GUESS:
if (maker_note->offset + 10 + 4 >= value_len) {
/* Can not read dir_start+10 since it's beyond value end */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "IFD data too short: 0x%04X", value_len);
return FALSE;
}
offset_diff = 2 + NumDirEntries*12 + 4 - php_ifd_get32u(dir_start+10, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Using automatic offset correction: 0x%04X", ((int)dir_start-(int)offset_base+maker_note->offset+displacement) + offset_diff);
#endif
if (offset_diff < 0 || offset_diff >= value_len ) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "IFD data bad offset: 0x%04X length 0x%04X", offset_diff, value_len);
return FALSE;
}
offset_base = value_ptr + offset_diff;
break;
default:
case MN_OFFSET_NORMAL:
break;
}
if ((2+NumDirEntries*12) > value_len) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size: 2 + 0x%04X*12 = 0x%04X > 0x%04X", NumDirEntries, 2+NumDirEntries*12, value_len);
return FALSE;
}
for (de=0;de<NumDirEntries;de++) {
if (!exif_process_IFD_TAG(ImageInfo, dir_start + 2 + 12 * de,
offset_base, IFDlength, displacement, section_index, 0, maker_note->tag_table TSRMLS_CC)) {
return FALSE;
}
}
ImageInfo->motorola_intel = old_motorola_intel;
/* NextDirOffset (must be NULL) = php_ifd_get32u(dir_start+2+12*de, ImageInfo->motorola_intel);*/
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Subsection %s done", exif_get_sectionname(SECTION_MAKERNOTE));
#endif
return TRUE;
}
/* }}} */
/* {{{ exif_process_IFD_TAG
* Process one of the nested IFDs directories. */
static int exif_process_IFD_TAG(image_info_type *ImageInfo, char *dir_entry, char *offset_base, size_t IFDlength, size_t displacement, int section_index, int ReadNextIFD, tag_table_type tag_table TSRMLS_DC)
{
size_t length;
int tag, format, components;
char *value_ptr, tagname[64], cbuf[32], *outside=NULL;
size_t byte_count, offset_val, fpos, fgot;
int64_t byte_count_signed;
xp_field_type *tmp_xp;
#ifdef EXIF_DEBUG
char *dump_data;
int dump_free;
#endif /* EXIF_DEBUG */
/* Protect against corrupt headers */
if (ImageInfo->ifd_nesting_level > MAX_IFD_NESTING_LEVEL) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "corrupt EXIF header: maximum directory nesting level reached");
return FALSE;
}
ImageInfo->ifd_nesting_level++;
tag = php_ifd_get16u(dir_entry, ImageInfo->motorola_intel);
format = php_ifd_get16u(dir_entry+2, ImageInfo->motorola_intel);
components = php_ifd_get32u(dir_entry+4, ImageInfo->motorola_intel);
if (!format || format > NUM_FORMATS) {
/* (-1) catches illegal zero case as unsigned underflows to positive large. */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal format code 0x%04X, suppose BYTE", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), format);
format = TAG_FMT_BYTE;
/*return TRUE;*/
}
if (components < 0) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal components(%ld)", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), components);
return FALSE;
}
byte_count_signed = (int64_t)components * php_tiff_bytes_per_format[format];
if (byte_count_signed < 0 || (byte_count_signed > INT32_MAX)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal byte_count", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC));
return FALSE;
}
byte_count = (size_t)byte_count_signed;
if (byte_count > 4) {
offset_val = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
/* If its bigger than 4 bytes, the dir entry contains an offset. */
value_ptr = offset_base+offset_val;
/*
dir_entry is ImageInfo->file.list[sn].data+2+i*12
offset_base is ImageInfo->file.list[sn].data-dir_offset
dir_entry - offset_base is dir_offset+2+i*12
*/
if (byte_count > IFDlength || offset_val > IFDlength-byte_count || value_ptr < dir_entry || offset_val < (size_t)(dir_entry-offset_base)) {
/* It is important to check for IMAGE_FILETYPE_TIFF
* JPEG does not use absolute pointers instead its pointers are
* relative to the start of the TIFF header in APP1 section. */
if (byte_count > ImageInfo->FileSize || offset_val>ImageInfo->FileSize-byte_count || (ImageInfo->FileType!=IMAGE_FILETYPE_TIFF_II && ImageInfo->FileType!=IMAGE_FILETYPE_TIFF_MM && ImageInfo->FileType!=IMAGE_FILETYPE_JPEG)) {
if (value_ptr < dir_entry) {
/* we can read this if offset_val > 0 */
/* some files have their values in other parts of the file */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal pointer offset(x%04X < x%04X)", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), offset_val, dir_entry);
} else {
/* this is for sure not allowed */
/* exception are IFD pointers */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal pointer offset(x%04X + x%04X = x%04X > x%04X)", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), offset_val, byte_count, offset_val+byte_count, IFDlength);
}
return FALSE;
}
if (byte_count>sizeof(cbuf)) {
/* mark as outside range and get buffer */
value_ptr = safe_emalloc(byte_count, 1, 0);
outside = value_ptr;
} else {
/* In most cases we only access a small range so
* it is faster to use a static buffer there
* BUT it offers also the possibility to have
* pointers read without the need to free them
* explicitley before returning. */
memset(&cbuf, 0, sizeof(cbuf));
value_ptr = cbuf;
}
fpos = php_stream_tell(ImageInfo->infile);
php_stream_seek(ImageInfo->infile, displacement+offset_val, SEEK_SET);
fgot = php_stream_tell(ImageInfo->infile);
if (fgot!=displacement+offset_val) {
EFREE_IF(outside);
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Wrong file pointer: 0x%08X != 0x%08X", fgot, displacement+offset_val);
return FALSE;
}
fgot = php_stream_read(ImageInfo->infile, value_ptr, byte_count);
php_stream_seek(ImageInfo->infile, fpos, SEEK_SET);
if (fgot<byte_count) {
EFREE_IF(outside);
EXIF_ERRLOG_FILEEOF(ImageInfo)
return FALSE;
}
}
} else {
/* 4 bytes or less and value is in the dir entry itself */
value_ptr = dir_entry+8;
offset_val= value_ptr-offset_base;
}
ImageInfo->sections_found |= FOUND_ANY_TAG;
#ifdef EXIF_DEBUG
dump_data = exif_dump_data(&dump_free, format, components, length, ImageInfo->motorola_intel, value_ptr TSRMLS_CC);
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process tag(x%04X=%s,@x%04X + x%04X(=%d)): %s%s %s", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), offset_val+displacement, byte_count, byte_count, (components>1)&&format!=TAG_FMT_UNDEFINED&&format!=TAG_FMT_STRING?"ARRAY OF ":"", exif_get_tagformat(format), dump_data);
if (dump_free) {
efree(dump_data);
}
#endif
if (section_index==SECTION_THUMBNAIL) {
if (!ImageInfo->Thumbnail.data) {
switch(tag) {
case TAG_IMAGEWIDTH:
case TAG_COMP_IMAGE_WIDTH:
ImageInfo->Thumbnail.width = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_IMAGEHEIGHT:
case TAG_COMP_IMAGE_HEIGHT:
ImageInfo->Thumbnail.height = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_STRIP_OFFSETS:
case TAG_JPEG_INTERCHANGE_FORMAT:
/* accept both formats */
ImageInfo->Thumbnail.offset = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_STRIP_BYTE_COUNTS:
if (ImageInfo->FileType == IMAGE_FILETYPE_TIFF_II || ImageInfo->FileType == IMAGE_FILETYPE_TIFF_MM) {
ImageInfo->Thumbnail.filetype = ImageInfo->FileType;
} else {
/* motorola is easier to read */
ImageInfo->Thumbnail.filetype = IMAGE_FILETYPE_TIFF_MM;
}
ImageInfo->Thumbnail.size = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_JPEG_INTERCHANGE_FORMAT_LEN:
if (ImageInfo->Thumbnail.filetype == IMAGE_FILETYPE_UNKNOWN) {
ImageInfo->Thumbnail.filetype = IMAGE_FILETYPE_JPEG;
ImageInfo->Thumbnail.size = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
}
break;
}
}
} else {
if (section_index==SECTION_IFD0 || section_index==SECTION_EXIF)
switch(tag) {
case TAG_COPYRIGHT:
/* check for "<photographer> NUL <editor> NUL" */
if (byte_count>1 && (length=php_strnlen(value_ptr, byte_count)) > 0) {
if (length<byte_count-1) {
/* When there are any characters after the first NUL */
ImageInfo->CopyrightPhotographer = estrdup(value_ptr);
ImageInfo->CopyrightEditor = estrndup(value_ptr+length+1, byte_count-length-1);
spprintf(&ImageInfo->Copyright, 0, "%s, %s", ImageInfo->CopyrightPhotographer, ImageInfo->CopyrightEditor);
/* format = TAG_FMT_UNDEFINED; this musn't be ASCII */
/* but we are not supposed to change this */
/* keep in mind that image_info does not store editor value */
} else {
ImageInfo->Copyright = estrndup(value_ptr, byte_count);
}
}
break;
case TAG_USERCOMMENT:
ImageInfo->UserCommentLength = exif_process_user_comment(ImageInfo, &(ImageInfo->UserComment), &(ImageInfo->UserCommentEncoding), value_ptr, byte_count TSRMLS_CC);
break;
case TAG_XP_TITLE:
case TAG_XP_COMMENTS:
case TAG_XP_AUTHOR:
case TAG_XP_KEYWORDS:
case TAG_XP_SUBJECT:
tmp_xp = (xp_field_type*)safe_erealloc(ImageInfo->xp_fields.list, (ImageInfo->xp_fields.count+1), sizeof(xp_field_type), 0);
ImageInfo->sections_found |= FOUND_WINXP;
ImageInfo->xp_fields.list = tmp_xp;
ImageInfo->xp_fields.count++;
exif_process_unicode(ImageInfo, &(ImageInfo->xp_fields.list[ImageInfo->xp_fields.count-1]), tag, value_ptr, byte_count TSRMLS_CC);
break;
case TAG_FNUMBER:
/* Simplest way of expressing aperture, so I trust it the most.
(overwrite previously computed value if there is one) */
ImageInfo->ApertureFNumber = (float)exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_APERTURE:
case TAG_MAX_APERTURE:
/* More relevant info always comes earlier, so only use this field if we don't
have appropriate aperture information yet. */
if (ImageInfo->ApertureFNumber == 0) {
ImageInfo->ApertureFNumber
= (float)exp(exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC)*log(2)*0.5);
}
break;
case TAG_SHUTTERSPEED:
/* More complicated way of expressing exposure time, so only use
this value if we don't already have it from somewhere else.
SHUTTERSPEED comes after EXPOSURE TIME
*/
if (ImageInfo->ExposureTime == 0) {
ImageInfo->ExposureTime
= (float)(1/exp(exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC)*log(2)));
}
break;
case TAG_EXPOSURETIME:
ImageInfo->ExposureTime = -1;
break;
case TAG_COMP_IMAGE_WIDTH:
ImageInfo->ExifImageWidth = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_FOCALPLANE_X_RES:
ImageInfo->FocalplaneXRes = exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_SUBJECT_DISTANCE:
/* Inidcates the distacne the autofocus camera is focused to.
Tends to be less accurate as distance increases. */
ImageInfo->Distance = (float)exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_FOCALPLANE_RESOLUTION_UNIT:
switch((int)exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC)) {
case 1: ImageInfo->FocalplaneUnits = 25.4; break; /* inch */
case 2:
/* According to the information I was using, 2 measn meters.
But looking at the Cannon powershot's files, inches is the only
sensible value. */
ImageInfo->FocalplaneUnits = 25.4;
break;
case 3: ImageInfo->FocalplaneUnits = 10; break; /* centimeter */
case 4: ImageInfo->FocalplaneUnits = 1; break; /* milimeter */
case 5: ImageInfo->FocalplaneUnits = .001; break; /* micrometer */
}
break;
case TAG_SUB_IFD:
if (format==TAG_FMT_IFD) {
/* If this is called we are either in a TIFFs thumbnail or a JPEG where we cannot handle it */
/* TIFF thumbnail: our data structure cannot store a thumbnail of a thumbnail */
/* JPEG do we have the data area and what to do with it */
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Skip SUB IFD");
}
break;
case TAG_MAKE:
ImageInfo->make = estrndup(value_ptr, byte_count);
break;
case TAG_MODEL:
ImageInfo->model = estrndup(value_ptr, byte_count);
break;
case TAG_MAKER_NOTE:
if (!exif_process_IFD_in_MAKERNOTE(ImageInfo, value_ptr, byte_count, offset_base, IFDlength, displacement TSRMLS_CC)) {
EFREE_IF(outside);
return FALSE;
}
break;
case TAG_EXIF_IFD_POINTER:
case TAG_GPS_IFD_POINTER:
case TAG_INTEROP_IFD_POINTER:
if (ReadNextIFD) {
char *Subdir_start;
int sub_section_index = 0;
switch(tag) {
case TAG_EXIF_IFD_POINTER:
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Found EXIF");
#endif
ImageInfo->sections_found |= FOUND_EXIF;
sub_section_index = SECTION_EXIF;
break;
case TAG_GPS_IFD_POINTER:
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Found GPS");
#endif
ImageInfo->sections_found |= FOUND_GPS;
sub_section_index = SECTION_GPS;
break;
case TAG_INTEROP_IFD_POINTER:
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Found INTEROPERABILITY");
#endif
ImageInfo->sections_found |= FOUND_INTEROP;
sub_section_index = SECTION_INTEROP;
break;
}
Subdir_start = offset_base + php_ifd_get32u(value_ptr, ImageInfo->motorola_intel);
if (Subdir_start < offset_base || Subdir_start > offset_base+IFDlength) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD Pointer");
return FALSE;
}
if (!exif_process_IFD_in_JPEG(ImageInfo, Subdir_start, offset_base, IFDlength, displacement, sub_section_index TSRMLS_CC)) {
return FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Subsection %s done", exif_get_sectionname(sub_section_index));
#endif
}
}
}
exif_iif_add_tag(ImageInfo, section_index, exif_get_tagname(tag, tagname, sizeof(tagname), tag_table TSRMLS_CC), tag, format, components, value_ptr TSRMLS_CC);
EFREE_IF(outside);
return TRUE;
}
/* }}} */
/* {{{ exif_process_IFD_in_JPEG
* Process one of the nested IFDs directories. */
static int exif_process_IFD_in_JPEG(image_info_type *ImageInfo, char *dir_start, char *offset_base, size_t IFDlength, size_t displacement, int section_index TSRMLS_DC)
{
int de;
int NumDirEntries;
int NextDirOffset;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process %s (x%04X(=%d))", exif_get_sectionname(section_index), IFDlength, IFDlength);
#endif
ImageInfo->sections_found |= FOUND_IFD0;
if ((dir_start + 2) >= (offset_base+IFDlength)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size");
return FALSE;
}
NumDirEntries = php_ifd_get16u(dir_start, ImageInfo->motorola_intel);
if ((dir_start+2+NumDirEntries*12) > (offset_base+IFDlength)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size: x%04X + 2 + x%04X*12 = x%04X > x%04X", (int)((size_t)dir_start+2-(size_t)offset_base), NumDirEntries, (int)((size_t)dir_start+2+NumDirEntries*12-(size_t)offset_base), IFDlength);
return FALSE;
}
for (de=0;de<NumDirEntries;de++) {
if (!exif_process_IFD_TAG(ImageInfo, dir_start + 2 + 12 * de,
offset_base, IFDlength, displacement, section_index, 1, exif_get_tag_table(section_index) TSRMLS_CC)) {
return FALSE;
}
}
/*
* Ignore IFD2 if it purportedly exists
*/
if (section_index == SECTION_THUMBNAIL) {
return TRUE;
}
/*
* Hack to make it process IDF1 I hope
* There are 2 IDFs, the second one holds the keys (0x0201 and 0x0202) to the thumbnail
*/
if ((dir_start+2+12*de + 4) >= (offset_base+IFDlength)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size");
return FALSE;
}
NextDirOffset = php_ifd_get32u(dir_start+2+12*de, ImageInfo->motorola_intel);
if (NextDirOffset) {
/* the next line seems false but here IFDlength means length of all IFDs */
if (offset_base + NextDirOffset < offset_base || offset_base + NextDirOffset > offset_base+IFDlength) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD offset");
return FALSE;
}
/* That is the IFD for the first thumbnail */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Expect next IFD to be thumbnail");
#endif
if (exif_process_IFD_in_JPEG(ImageInfo, offset_base + NextDirOffset, offset_base, IFDlength, displacement, SECTION_THUMBNAIL TSRMLS_CC)) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail size: 0x%04X", ImageInfo->Thumbnail.size);
#endif
if (ImageInfo->Thumbnail.filetype != IMAGE_FILETYPE_UNKNOWN
&& ImageInfo->Thumbnail.size
&& ImageInfo->Thumbnail.offset
&& ImageInfo->read_thumbnail
) {
exif_thumbnail_extract(ImageInfo, offset_base, IFDlength TSRMLS_CC);
}
return TRUE;
} else {
return FALSE;
}
}
return TRUE;
}
/* }}} */
/* {{{ exif_process_TIFF_in_JPEG
Process a TIFF header in a JPEG file
*/
static void exif_process_TIFF_in_JPEG(image_info_type *ImageInfo, char *CharBuf, size_t length, size_t displacement TSRMLS_DC)
{
unsigned exif_value_2a, offset_of_ifd;
/* set the thumbnail stuff to nothing so we can test to see if they get set up */
if (memcmp(CharBuf, "II", 2) == 0) {
ImageInfo->motorola_intel = 0;
} else if (memcmp(CharBuf, "MM", 2) == 0) {
ImageInfo->motorola_intel = 1;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF alignment marker");
return;
}
/* Check the next two values for correctness. */
if (length < 8) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF start (1)");
return;
}
exif_value_2a = php_ifd_get16u(CharBuf+2, ImageInfo->motorola_intel);
offset_of_ifd = php_ifd_get32u(CharBuf+4, ImageInfo->motorola_intel);
if (exif_value_2a != 0x2a || offset_of_ifd < 0x08) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF start (1)");
return;
}
if (offset_of_ifd > length) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid IFD start");
return;
}
ImageInfo->sections_found |= FOUND_IFD0;
/* First directory starts at offset 8. Offsets starts at 0. */
exif_process_IFD_in_JPEG(ImageInfo, CharBuf+offset_of_ifd, CharBuf, length/*-14*/, displacement, SECTION_IFD0 TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process TIFF in JPEG done");
#endif
/* Compute the CCD width, in milimeters. */
if (ImageInfo->FocalplaneXRes != 0) {
ImageInfo->CCDWidth = (float)(ImageInfo->ExifImageWidth * ImageInfo->FocalplaneUnits / ImageInfo->FocalplaneXRes);
}
}
/* }}} */
/* {{{ exif_process_APP1
Process an JPEG APP1 block marker
Describes all the drivel that most digital cameras include...
*/
static void exif_process_APP1(image_info_type *ImageInfo, char *CharBuf, size_t length, size_t displacement TSRMLS_DC)
{
/* Check the APP1 for Exif Identifier Code */
static const uchar ExifHeader[] = {0x45, 0x78, 0x69, 0x66, 0x00, 0x00};
if (length <= 8 || memcmp(CharBuf+2, ExifHeader, 6)) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Incorrect APP1 Exif Identifier Code");
return;
}
exif_process_TIFF_in_JPEG(ImageInfo, CharBuf + 8, length - 8, displacement+8 TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process APP1/EXIF done");
#endif
}
/* }}} */
/* {{{ exif_process_APP12
Process an JPEG APP12 block marker used by OLYMPUS
*/
static void exif_process_APP12(image_info_type *ImageInfo, char *buffer, size_t length TSRMLS_DC)
{
size_t l1, l2=0;
if ((l1 = php_strnlen(buffer+2, length-2)) > 0) {
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Company", TAG_NONE, TAG_FMT_STRING, l1, buffer+2 TSRMLS_CC);
if (length > 2+l1+1) {
l2 = php_strnlen(buffer+2+l1+1, length-2-l1-1);
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Info", TAG_NONE, TAG_FMT_STRING, l2, buffer+2+l1+1 TSRMLS_CC);
}
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process section APP12 with l1=%d, l2=%d done", l1, l2);
#endif
}
/* }}} */
/* {{{ exif_scan_JPEG_header
* Parse the marker stream until SOS or EOI is seen; */
static int exif_scan_JPEG_header(image_info_type *ImageInfo TSRMLS_DC)
{
int section, sn;
int marker = 0, last_marker = M_PSEUDO, comment_correction=1;
unsigned int ll, lh;
uchar *Data;
size_t fpos, size, got, itemlen;
jpeg_sof_info sof_info;
for(section=0;;section++) {
#ifdef EXIF_DEBUG
fpos = php_stream_tell(ImageInfo->infile);
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Needing section %d @ 0x%08X", ImageInfo->file.count, fpos);
#endif
/* get marker byte, swallowing possible padding */
/* some software does not count the length bytes of COM section */
/* one company doing so is very much envolved in JPEG... so we accept too */
if (last_marker==M_COM && comment_correction) {
comment_correction = 2;
}
do {
if ((marker = php_stream_getc(ImageInfo->infile)) == EOF) {
EXIF_ERRLOG_CORRUPT(ImageInfo)
return FALSE;
}
if (last_marker==M_COM && comment_correction>0) {
if (marker!=0xFF) {
marker = 0xff;
comment_correction--;
} else {
last_marker = M_PSEUDO; /* stop skipping 0 for M_COM */
}
}
} while (marker == 0xff);
if (last_marker==M_COM && !comment_correction) {
exif_error_docref("exif_read_data#error_mcom" EXIFERR_CC, ImageInfo, E_NOTICE, "Image has corrupt COM section: some software set wrong length information");
}
if (last_marker==M_COM && comment_correction)
return M_EOI; /* ah illegal: char after COM section not 0xFF */
fpos = php_stream_tell(ImageInfo->infile);
if (marker == 0xff) {
/* 0xff is legal padding, but if we get that many, something's wrong. */
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "To many padding bytes");
return FALSE;
}
/* Read the length of the section. */
if ((lh = php_stream_getc(ImageInfo->infile)) == EOF) {
EXIF_ERRLOG_CORRUPT(ImageInfo)
return FALSE;
}
if ((ll = php_stream_getc(ImageInfo->infile)) == EOF) {
EXIF_ERRLOG_CORRUPT(ImageInfo)
return FALSE;
}
itemlen = (lh << 8) | ll;
if (itemlen < 2) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s, Section length: 0x%02X%02X", EXIF_ERROR_CORRUPT, lh, ll);
#else
EXIF_ERRLOG_CORRUPT(ImageInfo)
#endif
return FALSE;
}
sn = exif_file_sections_add(ImageInfo, marker, itemlen+1, NULL);
Data = ImageInfo->file.list[sn].data;
/* Store first two pre-read bytes. */
Data[0] = (uchar)lh;
Data[1] = (uchar)ll;
got = php_stream_read(ImageInfo->infile, (char*)(Data+2), itemlen-2); /* Read the whole section. */
if (got != itemlen-2) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error reading from file: got=x%04X(=%d) != itemlen-2=x%04X(=%d)", got, got, itemlen-2, itemlen-2);
return FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process section(x%02X=%s) @ x%04X + x%04X(=%d)", marker, exif_get_markername(marker), fpos, itemlen, itemlen);
#endif
switch(marker) {
case M_SOS: /* stop before hitting compressed data */
/* If reading entire image is requested, read the rest of the data. */
if (ImageInfo->read_all) {
/* Determine how much file is left. */
fpos = php_stream_tell(ImageInfo->infile);
size = ImageInfo->FileSize - fpos;
sn = exif_file_sections_add(ImageInfo, M_PSEUDO, size, NULL);
Data = ImageInfo->file.list[sn].data;
got = php_stream_read(ImageInfo->infile, (char*)Data, size);
if (got != size) {
EXIF_ERRLOG_FILEEOF(ImageInfo)
return FALSE;
}
}
return TRUE;
case M_EOI: /* in case it's a tables-only JPEG stream */
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "No image in jpeg!");
return (ImageInfo->sections_found&(~FOUND_COMPUTED)) ? TRUE : FALSE;
case M_COM: /* Comment section */
exif_process_COM(ImageInfo, (char *)Data, itemlen TSRMLS_CC);
break;
case M_EXIF:
if (!(ImageInfo->sections_found&FOUND_IFD0)) {
/*ImageInfo->sections_found |= FOUND_EXIF;*/
/* Seen files from some 'U-lead' software with Vivitar scanner
that uses marker 31 later in the file (no clue what for!) */
exif_process_APP1(ImageInfo, (char *)Data, itemlen, fpos TSRMLS_CC);
}
break;
case M_APP12:
exif_process_APP12(ImageInfo, (char *)Data, itemlen TSRMLS_CC);
break;
case M_SOF0:
case M_SOF1:
case M_SOF2:
case M_SOF3:
case M_SOF5:
case M_SOF6:
case M_SOF7:
case M_SOF9:
case M_SOF10:
case M_SOF11:
case M_SOF13:
case M_SOF14:
case M_SOF15:
if ((itemlen - 2) < 6) {
return FALSE;
}
exif_process_SOFn(Data, marker, &sof_info);
ImageInfo->Width = sof_info.width;
ImageInfo->Height = sof_info.height;
if (sof_info.num_components == 3) {
ImageInfo->IsColor = 1;
} else {
ImageInfo->IsColor = 0;
}
break;
default:
/* skip any other marker silently. */
break;
}
/* keep track of last marker */
last_marker = marker;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Done");
#endif
return TRUE;
}
/* }}} */
/* {{{ exif_scan_thumbnail
* scan JPEG in thumbnail (memory) */
static int exif_scan_thumbnail(image_info_type *ImageInfo TSRMLS_DC)
{
uchar c, *data = (uchar*)ImageInfo->Thumbnail.data;
int n, marker;
size_t length=2, pos=0;
jpeg_sof_info sof_info;
if (!data) {
return FALSE; /* nothing to do here */
}
if (memcmp(data, "\xFF\xD8\xFF", 3)) {
if (!ImageInfo->Thumbnail.width && !ImageInfo->Thumbnail.height) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Thumbnail is not a JPEG image");
}
return FALSE;
}
for (;;) {
pos += length;
if (pos>=ImageInfo->Thumbnail.size)
return FALSE;
c = data[pos++];
if (pos>=ImageInfo->Thumbnail.size)
return FALSE;
if (c != 0xFF) {
return FALSE;
}
n = 8;
while ((c = data[pos++]) == 0xFF && n--) {
if (pos+3>=ImageInfo->Thumbnail.size)
return FALSE;
/* +3 = pos++ of next check when reaching marker + 2 bytes for length */
}
if (c == 0xFF)
return FALSE;
marker = c;
length = php_jpg_get16(data+pos);
if (pos+length>=ImageInfo->Thumbnail.size) {
return FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: process section(x%02X=%s) @ x%04X + x%04X", marker, exif_get_markername(marker), pos, length);
#endif
switch (marker) {
case M_SOF0:
case M_SOF1:
case M_SOF2:
case M_SOF3:
case M_SOF5:
case M_SOF6:
case M_SOF7:
case M_SOF9:
case M_SOF10:
case M_SOF11:
case M_SOF13:
case M_SOF14:
case M_SOF15:
/* handle SOFn block */
exif_process_SOFn(data+pos, marker, &sof_info);
ImageInfo->Thumbnail.height = sof_info.height;
ImageInfo->Thumbnail.width = sof_info.width;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: size: %d * %d", sof_info.width, sof_info.height);
#endif
return TRUE;
case M_SOS:
case M_EOI:
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Could not compute size of thumbnail");
return FALSE;
break;
default:
/* just skip */
break;
}
}
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Could not compute size of thumbnail");
return FALSE;
}
/* }}} */
/* {{{ exif_process_IFD_in_TIFF
* Parse the TIFF header; */
static int exif_process_IFD_in_TIFF(image_info_type *ImageInfo, size_t dir_offset, int section_index TSRMLS_DC)
{
int i, sn, num_entries, sub_section_index = 0;
unsigned char *dir_entry;
char tagname[64];
size_t ifd_size, dir_size, entry_offset, next_offset, entry_length, entry_value=0, fgot;
int entry_tag , entry_type;
tag_table_type tag_table = exif_get_tag_table(section_index);
if (ImageInfo->ifd_nesting_level > MAX_IFD_NESTING_LEVEL) {
return FALSE;
}
if (ImageInfo->FileSize >= dir_offset+2) {
sn = exif_file_sections_add(ImageInfo, M_PSEUDO, 2, NULL);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: filesize(x%04X), IFD dir(x%04X + x%04X)", ImageInfo->FileSize, dir_offset, 2);
#endif
php_stream_seek(ImageInfo->infile, dir_offset, SEEK_SET); /* we do not know the order of sections */
php_stream_read(ImageInfo->infile, (char*)ImageInfo->file.list[sn].data, 2);
num_entries = php_ifd_get16u(ImageInfo->file.list[sn].data, ImageInfo->motorola_intel);
dir_size = 2/*num dir entries*/ +12/*length of entry*/*num_entries +4/* offset to next ifd (points to thumbnail or NULL)*/;
if (ImageInfo->FileSize >= dir_offset+dir_size) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: filesize(x%04X), IFD dir(x%04X + x%04X), IFD entries(%d)", ImageInfo->FileSize, dir_offset+2, dir_size-2, num_entries);
#endif
if (exif_file_sections_realloc(ImageInfo, sn, dir_size TSRMLS_CC)) {
return FALSE;
}
php_stream_read(ImageInfo->infile, (char*)(ImageInfo->file.list[sn].data+2), dir_size-2);
/*exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Dump: %s", exif_char_dump(ImageInfo->file.list[sn].data, dir_size, 0));*/
next_offset = php_ifd_get32u(ImageInfo->file.list[sn].data + dir_size - 4, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF done, next offset x%04X", next_offset);
#endif
/* now we have the directory we can look how long it should be */
ifd_size = dir_size;
for(i=0;i<num_entries;i++) {
dir_entry = ImageInfo->file.list[sn].data+2+i*12;
entry_tag = php_ifd_get16u(dir_entry+0, ImageInfo->motorola_intel);
entry_type = php_ifd_get16u(dir_entry+2, ImageInfo->motorola_intel);
if (entry_type > NUM_FORMATS) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: tag(0x%04X,%12s): Illegal format code 0x%04X, switching to BYTE", entry_tag, exif_get_tagname(entry_tag, tagname, -12, tag_table TSRMLS_CC), entry_type);
/* Since this is repeated in exif_process_IFD_TAG make it a notice here */
/* and make it a warning in the exif_process_IFD_TAG which is called */
/* elsewhere. */
entry_type = TAG_FMT_BYTE;
/*The next line would break the image on writeback: */
/* php_ifd_set16u(dir_entry+2, entry_type, ImageInfo->motorola_intel);*/
}
entry_length = php_ifd_get32u(dir_entry+4, ImageInfo->motorola_intel) * php_tiff_bytes_per_format[entry_type];
if (entry_length <= 4) {
switch(entry_type) {
case TAG_FMT_USHORT:
entry_value = php_ifd_get16u(dir_entry+8, ImageInfo->motorola_intel);
break;
case TAG_FMT_SSHORT:
entry_value = php_ifd_get16s(dir_entry+8, ImageInfo->motorola_intel);
break;
case TAG_FMT_ULONG:
entry_value = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
break;
case TAG_FMT_SLONG:
entry_value = php_ifd_get32s(dir_entry+8, ImageInfo->motorola_intel);
break;
}
switch(entry_tag) {
case TAG_IMAGEWIDTH:
case TAG_COMP_IMAGE_WIDTH:
ImageInfo->Width = entry_value;
break;
case TAG_IMAGEHEIGHT:
case TAG_COMP_IMAGE_HEIGHT:
ImageInfo->Height = entry_value;
break;
case TAG_PHOTOMETRIC_INTERPRETATION:
switch (entry_value) {
case PMI_BLACK_IS_ZERO:
case PMI_WHITE_IS_ZERO:
case PMI_TRANSPARENCY_MASK:
ImageInfo->IsColor = 0;
break;
case PMI_RGB:
case PMI_PALETTE_COLOR:
case PMI_SEPARATED:
case PMI_YCBCR:
case PMI_CIELAB:
ImageInfo->IsColor = 1;
break;
}
break;
}
} else {
entry_offset = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
/* if entry needs expading ifd cache and entry is at end of current ifd cache. */
/* otherwise there may be huge holes between two entries */
if (entry_offset + entry_length > dir_offset + ifd_size
&& entry_offset == dir_offset + ifd_size) {
ifd_size = entry_offset + entry_length - dir_offset;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Resize struct: x%04X + x%04X - x%04X = x%04X", entry_offset, entry_length, dir_offset, ifd_size);
#endif
}
}
}
if (ImageInfo->FileSize >= dir_offset + ImageInfo->file.list[sn].size) {
if (ifd_size > dir_size) {
if (dir_offset + ifd_size > ImageInfo->FileSize) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than size of IFD(x%04X + x%04X)", ImageInfo->FileSize, dir_offset, ifd_size);
return FALSE;
}
if (exif_file_sections_realloc(ImageInfo, sn, ifd_size TSRMLS_CC)) {
return FALSE;
}
/* read values not stored in directory itself */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: filesize(x%04X), IFD(x%04X + x%04X)", ImageInfo->FileSize, dir_offset, ifd_size);
#endif
php_stream_read(ImageInfo->infile, (char*)(ImageInfo->file.list[sn].data+dir_size), ifd_size-dir_size);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF, done");
#endif
}
/* now process the tags */
for(i=0;i<num_entries;i++) {
dir_entry = ImageInfo->file.list[sn].data+2+i*12;
entry_tag = php_ifd_get16u(dir_entry+0, ImageInfo->motorola_intel);
entry_type = php_ifd_get16u(dir_entry+2, ImageInfo->motorola_intel);
/*entry_length = php_ifd_get32u(dir_entry+4, ImageInfo->motorola_intel);*/
if (entry_tag == TAG_EXIF_IFD_POINTER ||
entry_tag == TAG_INTEROP_IFD_POINTER ||
entry_tag == TAG_GPS_IFD_POINTER ||
entry_tag == TAG_SUB_IFD
) {
switch(entry_tag) {
case TAG_EXIF_IFD_POINTER:
ImageInfo->sections_found |= FOUND_EXIF;
sub_section_index = SECTION_EXIF;
break;
case TAG_GPS_IFD_POINTER:
ImageInfo->sections_found |= FOUND_GPS;
sub_section_index = SECTION_GPS;
break;
case TAG_INTEROP_IFD_POINTER:
ImageInfo->sections_found |= FOUND_INTEROP;
sub_section_index = SECTION_INTEROP;
break;
case TAG_SUB_IFD:
ImageInfo->sections_found |= FOUND_THUMBNAIL;
sub_section_index = SECTION_THUMBNAIL;
break;
}
entry_offset = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Next IFD: %s @x%04X", exif_get_sectionname(sub_section_index), entry_offset);
#endif
ImageInfo->ifd_nesting_level++;
exif_process_IFD_in_TIFF(ImageInfo, entry_offset, sub_section_index TSRMLS_CC);
if (section_index!=SECTION_THUMBNAIL && entry_tag==TAG_SUB_IFD) {
if (ImageInfo->Thumbnail.filetype != IMAGE_FILETYPE_UNKNOWN
&& ImageInfo->Thumbnail.size
&& ImageInfo->Thumbnail.offset
&& ImageInfo->read_thumbnail
) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "%s THUMBNAIL @0x%04X + 0x%04X", ImageInfo->Thumbnail.data ? "Ignore" : "Read", ImageInfo->Thumbnail.offset, ImageInfo->Thumbnail.size);
#endif
if (!ImageInfo->Thumbnail.data) {
ImageInfo->Thumbnail.data = safe_emalloc(ImageInfo->Thumbnail.size, 1, 0);
php_stream_seek(ImageInfo->infile, ImageInfo->Thumbnail.offset, SEEK_SET);
fgot = php_stream_read(ImageInfo->infile, ImageInfo->Thumbnail.data, ImageInfo->Thumbnail.size);
if (fgot < ImageInfo->Thumbnail.size) {
EXIF_ERRLOG_THUMBEOF(ImageInfo)
}
exif_thumbnail_build(ImageInfo TSRMLS_CC);
}
}
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Next IFD: %s done", exif_get_sectionname(sub_section_index));
#endif
} else {
if (!exif_process_IFD_TAG(ImageInfo, (char*)dir_entry,
(char*)(ImageInfo->file.list[sn].data-dir_offset),
ifd_size, 0, section_index, 0, tag_table TSRMLS_CC)) {
return FALSE;
}
}
}
/* If we had a thumbnail in a SUB_IFD we have ANOTHER image in NEXT IFD */
if (next_offset && section_index != SECTION_THUMBNAIL) {
/* this should be a thumbnail IFD */
/* the thumbnail itself is stored at Tag=StripOffsets */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read next IFD (THUMBNAIL) at x%04X", next_offset);
#endif
ImageInfo->ifd_nesting_level++;
exif_process_IFD_in_TIFF(ImageInfo, next_offset, SECTION_THUMBNAIL TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "%s THUMBNAIL @0x%04X + 0x%04X", ImageInfo->Thumbnail.data ? "Ignore" : "Read", ImageInfo->Thumbnail.offset, ImageInfo->Thumbnail.size);
#endif
if (!ImageInfo->Thumbnail.data && ImageInfo->Thumbnail.offset && ImageInfo->Thumbnail.size && ImageInfo->read_thumbnail) {
ImageInfo->Thumbnail.data = safe_emalloc(ImageInfo->Thumbnail.size, 1, 0);
php_stream_seek(ImageInfo->infile, ImageInfo->Thumbnail.offset, SEEK_SET);
fgot = php_stream_read(ImageInfo->infile, ImageInfo->Thumbnail.data, ImageInfo->Thumbnail.size);
if (fgot < ImageInfo->Thumbnail.size) {
EXIF_ERRLOG_THUMBEOF(ImageInfo)
}
exif_thumbnail_build(ImageInfo TSRMLS_CC);
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read next IFD (THUMBNAIL) done");
#endif
}
return TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than size of IFD(x%04X)", ImageInfo->FileSize, dir_offset+ImageInfo->file.list[sn].size);
return FALSE;
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than size of IFD dir(x%04X)", ImageInfo->FileSize, dir_offset+dir_size);
return FALSE;
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than start of IFD dir(x%04X)", ImageInfo->FileSize, dir_offset+2);
return FALSE;
}
}
/* }}} */
/* {{{ exif_scan_FILE_header
* Parse the marker stream until SOS or EOI is seen; */
static int exif_scan_FILE_header(image_info_type *ImageInfo TSRMLS_DC)
{
unsigned char file_header[8];
int ret = FALSE;
ImageInfo->FileType = IMAGE_FILETYPE_UNKNOWN;
if (ImageInfo->FileSize >= 2) {
php_stream_seek(ImageInfo->infile, 0, SEEK_SET);
if (php_stream_read(ImageInfo->infile, (char*)file_header, 2) != 2) {
return FALSE;
}
if ((file_header[0]==0xff) && (file_header[1]==M_SOI)) {
ImageInfo->FileType = IMAGE_FILETYPE_JPEG;
if (exif_scan_JPEG_header(ImageInfo TSRMLS_CC)) {
ret = TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid JPEG file");
}
} else if (ImageInfo->FileSize >= 8) {
if (php_stream_read(ImageInfo->infile, (char*)(file_header+2), 6) != 6) {
return FALSE;
}
if (!memcmp(file_header, "II\x2A\x00", 4)) {
ImageInfo->FileType = IMAGE_FILETYPE_TIFF_II;
ImageInfo->motorola_intel = 0;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "File has TIFF/II format");
#endif
ImageInfo->sections_found |= FOUND_IFD0;
if (exif_process_IFD_in_TIFF(ImageInfo,
php_ifd_get32u(file_header + 4, ImageInfo->motorola_intel),
SECTION_IFD0 TSRMLS_CC)) {
ret = TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF file");
}
} else if (!memcmp(file_header, "MM\x00\x2a", 4)) {
ImageInfo->FileType = IMAGE_FILETYPE_TIFF_MM;
ImageInfo->motorola_intel = 1;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "File has TIFF/MM format");
#endif
ImageInfo->sections_found |= FOUND_IFD0;
if (exif_process_IFD_in_TIFF(ImageInfo,
php_ifd_get32u(file_header + 4, ImageInfo->motorola_intel),
SECTION_IFD0 TSRMLS_CC)) {
ret = TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF file");
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "File not supported");
return FALSE;
}
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "File too small (%d)", ImageInfo->FileSize);
}
return ret;
}
/* }}} */
/* {{{ exif_discard_imageinfo
Discard data scanned by exif_read_file.
*/
static int exif_discard_imageinfo(image_info_type *ImageInfo)
{
int i;
EFREE_IF(ImageInfo->FileName);
EFREE_IF(ImageInfo->UserComment);
EFREE_IF(ImageInfo->UserCommentEncoding);
EFREE_IF(ImageInfo->Copyright);
EFREE_IF(ImageInfo->CopyrightPhotographer);
EFREE_IF(ImageInfo->CopyrightEditor);
EFREE_IF(ImageInfo->Thumbnail.data);
EFREE_IF(ImageInfo->encode_unicode);
EFREE_IF(ImageInfo->decode_unicode_be);
EFREE_IF(ImageInfo->decode_unicode_le);
EFREE_IF(ImageInfo->encode_jis);
EFREE_IF(ImageInfo->decode_jis_be);
EFREE_IF(ImageInfo->decode_jis_le);
EFREE_IF(ImageInfo->make);
EFREE_IF(ImageInfo->model);
for (i=0; i<ImageInfo->xp_fields.count; i++) {
EFREE_IF(ImageInfo->xp_fields.list[i].value);
}
EFREE_IF(ImageInfo->xp_fields.list);
for (i=0; i<SECTION_COUNT; i++) {
exif_iif_free(ImageInfo, i);
}
exif_file_sections_free(ImageInfo);
memset(ImageInfo, 0, sizeof(*ImageInfo));
return TRUE;
}
/* }}} */
/* {{{ exif_read_file
*/
static int exif_read_file(image_info_type *ImageInfo, char *FileName, int read_thumbnail, int read_all TSRMLS_DC)
{
int ret;
struct stat st;
/* Start with an empty image information structure. */
memset(ImageInfo, 0, sizeof(*ImageInfo));
ImageInfo->motorola_intel = -1; /* flag as unknown */
ImageInfo->infile = php_stream_open_wrapper(FileName, "rb", STREAM_MUST_SEEK|IGNORE_PATH, NULL);
if (!ImageInfo->infile) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Unable to open file");
return FALSE;
}
if (php_stream_is(ImageInfo->infile, PHP_STREAM_IS_STDIO)) {
if (VCWD_STAT(FileName, &st) >= 0) {
if ((st.st_mode & S_IFMT) != S_IFREG) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Not a file");
php_stream_close(ImageInfo->infile);
return FALSE;
}
/* Store file date/time. */
ImageInfo->FileDateTime = st.st_mtime;
ImageInfo->FileSize = st.st_size;
/*exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Opened stream is file: %d", ImageInfo->FileSize);*/
}
} else {
if (!ImageInfo->FileSize) {
php_stream_seek(ImageInfo->infile, 0, SEEK_END);
ImageInfo->FileSize = php_stream_tell(ImageInfo->infile);
php_stream_seek(ImageInfo->infile, 0, SEEK_SET);
}
}
php_basename(FileName, strlen(FileName), NULL, 0, &(ImageInfo->FileName), NULL TSRMLS_CC);
ImageInfo->read_thumbnail = read_thumbnail;
ImageInfo->read_all = read_all;
ImageInfo->Thumbnail.filetype = IMAGE_FILETYPE_UNKNOWN;
ImageInfo->encode_unicode = safe_estrdup(EXIF_G(encode_unicode));
ImageInfo->decode_unicode_be = safe_estrdup(EXIF_G(decode_unicode_be));
ImageInfo->decode_unicode_le = safe_estrdup(EXIF_G(decode_unicode_le));
ImageInfo->encode_jis = safe_estrdup(EXIF_G(encode_jis));
ImageInfo->decode_jis_be = safe_estrdup(EXIF_G(decode_jis_be));
ImageInfo->decode_jis_le = safe_estrdup(EXIF_G(decode_jis_le));
ImageInfo->ifd_nesting_level = 0;
/* Scan the JPEG headers. */
ret = exif_scan_FILE_header(ImageInfo TSRMLS_CC);
php_stream_close(ImageInfo->infile);
return ret;
}
/* }}} */
/* {{{ proto array exif_read_data(string filename [, sections_needed [, sub_arrays[, read_thumbnail]]])
Reads header data from the JPEG/TIFF image filename and optionally reads the internal thumbnails */
PHP_FUNCTION(exif_read_data)
{
char *p_name, *p_sections_needed = NULL;
int p_name_len, p_sections_needed_len = 0;
zend_bool sub_arrays=0, read_thumbnail=0, read_all=0;
int i, ret, sections_needed=0;
image_info_type ImageInfo;
char tmp[64], *sections_str, *s;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sbb", &p_name, &p_name_len, &p_sections_needed, &p_sections_needed_len, &sub_arrays, &read_thumbnail) == FAILURE) {
return;
}
memset(&ImageInfo, 0, sizeof(ImageInfo));
if (p_sections_needed) {
spprintf(§ions_str, 0, ",%s,", p_sections_needed);
/* sections_str DOES start with , and SPACES are NOT allowed in names */
s = sections_str;
while (*++s) {
if (*s == ' ') {
*s = ',';
}
}
for (i = 0; i < SECTION_COUNT; i++) {
snprintf(tmp, sizeof(tmp), ",%s,", exif_get_sectionname(i));
if (strstr(sections_str, tmp)) {
sections_needed |= 1<<i;
}
}
EFREE_IF(sections_str);
/* now see what we need */
#ifdef EXIF_DEBUG
sections_str = exif_get_sectionlist(sections_needed TSRMLS_CC);
if (!sections_str) {
RETURN_FALSE;
}
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Sections needed: %s", sections_str[0] ? sections_str : "None");
EFREE_IF(sections_str);
#endif
}
ret = exif_read_file(&ImageInfo, p_name, read_thumbnail, read_all TSRMLS_CC);
sections_str = exif_get_sectionlist(ImageInfo.sections_found TSRMLS_CC);
#ifdef EXIF_DEBUG
if (sections_str)
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Sections found: %s", sections_str[0] ? sections_str : "None");
#endif
ImageInfo.sections_found |= FOUND_COMPUTED|FOUND_FILE;/* do not inform about in debug*/
if (ret == FALSE || (sections_needed && !(sections_needed&ImageInfo.sections_found))) {
/* array_init must be checked at last! otherwise the array must be freed if a later test fails. */
exif_discard_imageinfo(&ImageInfo);
EFREE_IF(sections_str);
RETURN_FALSE;
}
array_init(return_value);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Generate section FILE");
#endif
/* now we can add our information */
exif_iif_add_str(&ImageInfo, SECTION_FILE, "FileName", ImageInfo.FileName TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_FILE, "FileDateTime", ImageInfo.FileDateTime TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_FILE, "FileSize", ImageInfo.FileSize TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_FILE, "FileType", ImageInfo.FileType TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_FILE, "MimeType", (char*)php_image_type_to_mime_type(ImageInfo.FileType) TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_FILE, "SectionsFound", sections_str ? sections_str : "NONE" TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Generate section COMPUTED");
#endif
if (ImageInfo.Width>0 && ImageInfo.Height>0) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "html" TSRMLS_CC, "width=\"%d\" height=\"%d\"", ImageInfo.Width, ImageInfo.Height);
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Height", ImageInfo.Height TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Width", ImageInfo.Width TSRMLS_CC);
}
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "IsColor", ImageInfo.IsColor TSRMLS_CC);
if (ImageInfo.motorola_intel != -1) {
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "ByteOrderMotorola", ImageInfo.motorola_intel TSRMLS_CC);
}
if (ImageInfo.FocalLength) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "FocalLength" TSRMLS_CC, "%4.1Fmm", ImageInfo.FocalLength);
if(ImageInfo.CCDWidth) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "35mmFocalLength" TSRMLS_CC, "%dmm", (int)(ImageInfo.FocalLength/ImageInfo.CCDWidth*35+0.5));
}
}
if(ImageInfo.CCDWidth) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "CCDWidth" TSRMLS_CC, "%dmm", (int)ImageInfo.CCDWidth);
}
if(ImageInfo.ExposureTime>0) {
if(ImageInfo.ExposureTime <= 0.5) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "ExposureTime" TSRMLS_CC, "%0.3F s (1/%d)", ImageInfo.ExposureTime, (int)(0.5 + 1/ImageInfo.ExposureTime));
} else {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "ExposureTime" TSRMLS_CC, "%0.3F s", ImageInfo.ExposureTime);
}
}
if(ImageInfo.ApertureFNumber) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "ApertureFNumber" TSRMLS_CC, "f/%.1F", ImageInfo.ApertureFNumber);
}
if(ImageInfo.Distance) {
if(ImageInfo.Distance<0) {
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "FocusDistance", "Infinite" TSRMLS_CC);
} else {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "FocusDistance" TSRMLS_CC, "%0.2Fm", ImageInfo.Distance);
}
}
if (ImageInfo.UserComment) {
exif_iif_add_buffer(&ImageInfo, SECTION_COMPUTED, "UserComment", ImageInfo.UserCommentLength, ImageInfo.UserComment TSRMLS_CC);
if (ImageInfo.UserCommentEncoding && strlen(ImageInfo.UserCommentEncoding)) {
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "UserCommentEncoding", ImageInfo.UserCommentEncoding TSRMLS_CC);
}
}
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Copyright", ImageInfo.Copyright TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Copyright.Photographer", ImageInfo.CopyrightPhotographer TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Copyright.Editor", ImageInfo.CopyrightEditor TSRMLS_CC);
for (i=0; i<ImageInfo.xp_fields.count; i++) {
exif_iif_add_str(&ImageInfo, SECTION_WINXP, exif_get_tagname(ImageInfo.xp_fields.list[i].tag, NULL, 0, exif_get_tag_table(SECTION_WINXP) TSRMLS_CC), ImageInfo.xp_fields.list[i].value TSRMLS_CC);
}
if (ImageInfo.Thumbnail.size) {
if (read_thumbnail) {
/* not exif_iif_add_str : this is a buffer */
exif_iif_add_tag(&ImageInfo, SECTION_THUMBNAIL, "THUMBNAIL", TAG_NONE, TAG_FMT_UNDEFINED, ImageInfo.Thumbnail.size, ImageInfo.Thumbnail.data TSRMLS_CC);
}
if (!ImageInfo.Thumbnail.width || !ImageInfo.Thumbnail.height) {
/* try to evaluate if thumbnail data is present */
exif_scan_thumbnail(&ImageInfo TSRMLS_CC);
}
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Thumbnail.FileType", ImageInfo.Thumbnail.filetype TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Thumbnail.MimeType", (char*)php_image_type_to_mime_type(ImageInfo.Thumbnail.filetype) TSRMLS_CC);
}
if (ImageInfo.Thumbnail.width && ImageInfo.Thumbnail.height) {
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Thumbnail.Height", ImageInfo.Thumbnail.height TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Thumbnail.Width", ImageInfo.Thumbnail.width TSRMLS_CC);
}
EFREE_IF(sections_str);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Adding image infos");
#endif
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_FILE TSRMLS_CC);
add_assoc_image_info(return_value, 1, &ImageInfo, SECTION_COMPUTED TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_ANY_TAG TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_IFD0 TSRMLS_CC);
add_assoc_image_info(return_value, 1, &ImageInfo, SECTION_THUMBNAIL TSRMLS_CC);
add_assoc_image_info(return_value, 1, &ImageInfo, SECTION_COMMENT TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_EXIF TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_GPS TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_INTEROP TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_FPIX TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_APP12 TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_WINXP TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_MAKERNOTE TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Discarding info");
#endif
exif_discard_imageinfo(&ImageInfo);
#ifdef EXIF_DEBUG
php_error_docref1(NULL TSRMLS_CC, Z_STRVAL_PP(p_name), E_NOTICE, "done");
#endif
}
/* }}} */
/* {{{ proto string exif_thumbnail(string filename [, &width, &height [, &imagetype]])
Reads the embedded thumbnail */
PHP_FUNCTION(exif_thumbnail)
{
zval *p_width = 0, *p_height = 0, *p_imagetype = 0;
char *p_name;
int p_name_len, ret, arg_c = ZEND_NUM_ARGS();
image_info_type ImageInfo;
memset(&ImageInfo, 0, sizeof(ImageInfo));
if (arg_c!=1 && arg_c!=3 && arg_c!=4) {
WRONG_PARAM_COUNT;
}
if (zend_parse_parameters(arg_c TSRMLS_CC, "p|z/z/z/", &p_name, &p_name_len, &p_width, &p_height, &p_imagetype) == FAILURE) {
return;
}
ret = exif_read_file(&ImageInfo, p_name, 1, 0 TSRMLS_CC);
if (ret==FALSE) {
exif_discard_imageinfo(&ImageInfo);
RETURN_FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Thumbnail data %d %d %d, %d x %d", ImageInfo.Thumbnail.data, ImageInfo.Thumbnail.size, ImageInfo.Thumbnail.filetype, ImageInfo.Thumbnail.width, ImageInfo.Thumbnail.height);
#endif
if (!ImageInfo.Thumbnail.data || !ImageInfo.Thumbnail.size) {
exif_discard_imageinfo(&ImageInfo);
RETURN_FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Returning thumbnail(%d)", ImageInfo.Thumbnail.size);
#endif
ZVAL_STRINGL(return_value, ImageInfo.Thumbnail.data, ImageInfo.Thumbnail.size, 1);
if (arg_c >= 3) {
if (!ImageInfo.Thumbnail.width || !ImageInfo.Thumbnail.height) {
exif_scan_thumbnail(&ImageInfo TSRMLS_CC);
}
zval_dtor(p_width);
zval_dtor(p_height);
ZVAL_LONG(p_width, ImageInfo.Thumbnail.width);
ZVAL_LONG(p_height, ImageInfo.Thumbnail.height);
}
if (arg_c >= 4) {
zval_dtor(p_imagetype);
ZVAL_LONG(p_imagetype, ImageInfo.Thumbnail.filetype);
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Discarding info");
#endif
exif_discard_imageinfo(&ImageInfo);
#ifdef EXIF_DEBUG
php_error_docref1(NULL TSRMLS_CC, p_name, E_NOTICE, "Done");
#endif
}
/* }}} */
/* {{{ proto int exif_imagetype(string imagefile)
Get the type of an image */
PHP_FUNCTION(exif_imagetype)
{
char *imagefile;
int imagefile_len;
php_stream * stream;
int itype = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &imagefile, &imagefile_len) == FAILURE) {
return;
}
stream = php_stream_open_wrapper(imagefile, "rb", IGNORE_PATH|REPORT_ERRORS, NULL);
if (stream == NULL) {
RETURN_FALSE;
}
itype = php_getimagetype(stream, NULL TSRMLS_CC);
php_stream_close(stream);
if (itype == IMAGE_FILETYPE_UNKNOWN) {
RETURN_FALSE;
} else {
ZVAL_LONG(return_value, itype);
}
}
/* }}} */
#endif
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: sw=4 ts=4 tw=78 fdm=marker
* vim<600: sw=4 ts=4 tw=78
*/
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_5258_0 |
crossvul-cpp_data_bad_4936_0 | /*
* Copyright(c) 2006 - 2007 Atheros Corporation. All rights reserved.
* Copyright(c) 2007 - 2008 Chris Snook <csnook@redhat.com>
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/atomic.h>
#include <linux/crc32.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/hardirq.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/irqflags.h>
#include <linux/irqreturn.h>
#include <linux/mii.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/pm.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/tcp.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "atl2.h"
#define ATL2_DRV_VERSION "2.2.3"
static const char atl2_driver_name[] = "atl2";
static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
static const char atl2_driver_version[] = ATL2_DRV_VERSION;
static const struct ethtool_ops atl2_ethtool_ops;
MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(ATL2_DRV_VERSION);
/*
* atl2_pci_tbl - PCI Device ID Table
*/
static const struct pci_device_id atl2_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
/* required last entry */
{0,}
};
MODULE_DEVICE_TABLE(pci, atl2_pci_tbl);
static void atl2_check_options(struct atl2_adapter *adapter);
/**
* atl2_sw_init - Initialize general software structures (struct atl2_adapter)
* @adapter: board private structure to initialize
*
* atl2_sw_init initializes the Adapter private data structure.
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
*/
static int atl2_sw_init(struct atl2_adapter *adapter)
{
struct atl2_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
/* PCI config space info */
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
hw->revision_id = pdev->revision;
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
adapter->wol = 0;
adapter->ict = 50000; /* ~100ms */
adapter->link_speed = SPEED_0; /* hardware init */
adapter->link_duplex = FULL_DUPLEX;
hw->phy_configured = false;
hw->preamble_len = 7;
hw->ipgt = 0x60;
hw->min_ifg = 0x50;
hw->ipgr1 = 0x40;
hw->ipgr2 = 0x60;
hw->retry_buf = 2;
hw->max_retry = 0xf;
hw->lcol = 0x37;
hw->jam_ipg = 7;
hw->fc_rxd_hi = 0;
hw->fc_rxd_lo = 0;
hw->max_frame_size = adapter->netdev->mtu;
spin_lock_init(&adapter->stats_lock);
set_bit(__ATL2_DOWN, &adapter->flags);
return 0;
}
/**
* atl2_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
* The set_multi entry point is called whenever the multicast address
* list or the network interface flags are updated. This routine is
* responsible for configuring the hardware for proper multicast,
* promiscuous mode, and all-multi behavior.
*/
static void atl2_set_multi(struct net_device *netdev)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
struct netdev_hw_addr *ha;
u32 rctl;
u32 hash_value;
/* Check for Promiscuous and All Multicast modes */
rctl = ATL2_READ_REG(hw, REG_MAC_CTRL);
if (netdev->flags & IFF_PROMISC) {
rctl |= MAC_CTRL_PROMIS_EN;
} else if (netdev->flags & IFF_ALLMULTI) {
rctl |= MAC_CTRL_MC_ALL_EN;
rctl &= ~MAC_CTRL_PROMIS_EN;
} else
rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
ATL2_WRITE_REG(hw, REG_MAC_CTRL, rctl);
/* clear the old settings from the multicast hash table */
ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
netdev_for_each_mc_addr(ha, netdev) {
hash_value = atl2_hash_mc_addr(hw, ha->addr);
atl2_hash_set(hw, hash_value);
}
}
static void init_ring_ptrs(struct atl2_adapter *adapter)
{
/* Read / Write Ptr Initialize: */
adapter->txd_write_ptr = 0;
atomic_set(&adapter->txd_read_ptr, 0);
adapter->rxd_read_ptr = 0;
adapter->rxd_write_ptr = 0;
atomic_set(&adapter->txs_write_ptr, 0);
adapter->txs_next_clear = 0;
}
/**
* atl2_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
* Configure the Tx /Rx unit of the MAC after a reset.
*/
static int atl2_configure(struct atl2_adapter *adapter)
{
struct atl2_hw *hw = &adapter->hw;
u32 value;
/* clear interrupt status */
ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0xffffffff);
/* set MAC Address */
value = (((u32)hw->mac_addr[2]) << 24) |
(((u32)hw->mac_addr[3]) << 16) |
(((u32)hw->mac_addr[4]) << 8) |
(((u32)hw->mac_addr[5]));
ATL2_WRITE_REG(hw, REG_MAC_STA_ADDR, value);
value = (((u32)hw->mac_addr[0]) << 8) |
(((u32)hw->mac_addr[1]));
ATL2_WRITE_REG(hw, (REG_MAC_STA_ADDR+4), value);
/* HI base address */
ATL2_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI,
(u32)((adapter->ring_dma & 0xffffffff00000000ULL) >> 32));
/* LO base address */
ATL2_WRITE_REG(hw, REG_TXD_BASE_ADDR_LO,
(u32)(adapter->txd_dma & 0x00000000ffffffffULL));
ATL2_WRITE_REG(hw, REG_TXS_BASE_ADDR_LO,
(u32)(adapter->txs_dma & 0x00000000ffffffffULL));
ATL2_WRITE_REG(hw, REG_RXD_BASE_ADDR_LO,
(u32)(adapter->rxd_dma & 0x00000000ffffffffULL));
/* element count */
ATL2_WRITE_REGW(hw, REG_TXD_MEM_SIZE, (u16)(adapter->txd_ring_size/4));
ATL2_WRITE_REGW(hw, REG_TXS_MEM_SIZE, (u16)adapter->txs_ring_size);
ATL2_WRITE_REGW(hw, REG_RXD_BUF_NUM, (u16)adapter->rxd_ring_size);
/* config Internal SRAM */
/*
ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_tx_end);
ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_rx_end);
*/
/* config IPG/IFG */
value = (((u32)hw->ipgt & MAC_IPG_IFG_IPGT_MASK) <<
MAC_IPG_IFG_IPGT_SHIFT) |
(((u32)hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) <<
MAC_IPG_IFG_MIFG_SHIFT) |
(((u32)hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) <<
MAC_IPG_IFG_IPGR1_SHIFT)|
(((u32)hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) <<
MAC_IPG_IFG_IPGR2_SHIFT);
ATL2_WRITE_REG(hw, REG_MAC_IPG_IFG, value);
/* config Half-Duplex Control */
value = ((u32)hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
(((u32)hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) <<
MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
(0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
(((u32)hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) <<
MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
ATL2_WRITE_REG(hw, REG_MAC_HALF_DUPLX_CTRL, value);
/* set Interrupt Moderator Timer */
ATL2_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, adapter->imt);
ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_ITIMER_EN);
/* set Interrupt Clear Timer */
ATL2_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, adapter->ict);
/* set MTU */
ATL2_WRITE_REG(hw, REG_MTU, adapter->netdev->mtu +
ENET_HEADER_SIZE + VLAN_SIZE + ETHERNET_FCS_SIZE);
/* 1590 */
ATL2_WRITE_REG(hw, REG_TX_CUT_THRESH, 0x177);
/* flow control */
ATL2_WRITE_REGW(hw, REG_PAUSE_ON_TH, hw->fc_rxd_hi);
ATL2_WRITE_REGW(hw, REG_PAUSE_OFF_TH, hw->fc_rxd_lo);
/* Init mailbox */
ATL2_WRITE_REGW(hw, REG_MB_TXD_WR_IDX, (u16)adapter->txd_write_ptr);
ATL2_WRITE_REGW(hw, REG_MB_RXD_RD_IDX, (u16)adapter->rxd_read_ptr);
/* enable DMA read/write */
ATL2_WRITE_REGB(hw, REG_DMAR, DMAR_EN);
ATL2_WRITE_REGB(hw, REG_DMAW, DMAW_EN);
value = ATL2_READ_REG(&adapter->hw, REG_ISR);
if ((value & ISR_PHY_LINKDOWN) != 0)
value = 1; /* config failed */
else
value = 0;
/* clear all interrupt status */
ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0x3fffffff);
ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
return value;
}
/**
* atl2_setup_ring_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
* Return 0 on success, negative on failure
*/
static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
int size;
u8 offset = 0;
/* real ring DMA buffer */
adapter->ring_size = size =
adapter->txd_ring_size * 1 + 7 + /* dword align */
adapter->txs_ring_size * 4 + 7 + /* dword align */
adapter->rxd_ring_size * 1536 + 127; /* 128bytes align */
adapter->ring_vir_addr = pci_alloc_consistent(pdev, size,
&adapter->ring_dma);
if (!adapter->ring_vir_addr)
return -ENOMEM;
memset(adapter->ring_vir_addr, 0, adapter->ring_size);
/* Init TXD Ring */
adapter->txd_dma = adapter->ring_dma ;
offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0;
adapter->txd_dma += offset;
adapter->txd_ring = adapter->ring_vir_addr + offset;
/* Init TXS Ring */
adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size;
offset = (adapter->txs_dma & 0x7) ? (8 - (adapter->txs_dma & 0x7)) : 0;
adapter->txs_dma += offset;
adapter->txs_ring = (struct tx_pkt_status *)
(((u8 *)adapter->txd_ring) + (adapter->txd_ring_size + offset));
/* Init RXD Ring */
adapter->rxd_dma = adapter->txs_dma + adapter->txs_ring_size * 4;
offset = (adapter->rxd_dma & 127) ?
(128 - (adapter->rxd_dma & 127)) : 0;
if (offset > 7)
offset -= 8;
else
offset += (128 - 8);
adapter->rxd_dma += offset;
adapter->rxd_ring = (struct rx_desc *) (((u8 *)adapter->txs_ring) +
(adapter->txs_ring_size * 4 + offset));
/*
* Read / Write Ptr Initialize:
* init_ring_ptrs(adapter);
*/
return 0;
}
/**
* atl2_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
static inline void atl2_irq_enable(struct atl2_adapter *adapter)
{
ATL2_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
ATL2_WRITE_FLUSH(&adapter->hw);
}
/**
* atl2_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
static inline void atl2_irq_disable(struct atl2_adapter *adapter)
{
ATL2_WRITE_REG(&adapter->hw, REG_IMR, 0);
ATL2_WRITE_FLUSH(&adapter->hw);
synchronize_irq(adapter->pdev->irq);
}
static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl)
{
if (features & NETIF_F_HW_VLAN_CTAG_RX) {
/* enable VLAN tag insert/strip */
*ctrl |= MAC_CTRL_RMV_VLAN;
} else {
/* disable VLAN tag insert/strip */
*ctrl &= ~MAC_CTRL_RMV_VLAN;
}
}
static void atl2_vlan_mode(struct net_device *netdev,
netdev_features_t features)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
u32 ctrl;
atl2_irq_disable(adapter);
ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL);
__atl2_vlan_mode(features, &ctrl);
ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl);
atl2_irq_enable(adapter);
}
static void atl2_restore_vlan(struct atl2_adapter *adapter)
{
atl2_vlan_mode(adapter->netdev, adapter->netdev->features);
}
static netdev_features_t atl2_fix_features(struct net_device *netdev,
netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
*/
if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_CTAG_TX;
else
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features;
}
static int atl2_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
atl2_vlan_mode(netdev, features);
return 0;
}
static void atl2_intr_rx(struct atl2_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct rx_desc *rxd;
struct sk_buff *skb;
do {
rxd = adapter->rxd_ring+adapter->rxd_write_ptr;
if (!rxd->status.update)
break; /* end of tx */
/* clear this flag at once */
rxd->status.update = 0;
if (rxd->status.ok && rxd->status.pkt_size >= 60) {
int rx_size = (int)(rxd->status.pkt_size - 4);
/* alloc new buffer */
skb = netdev_alloc_skb_ip_align(netdev, rx_size);
if (NULL == skb) {
/*
* Check that some rx space is free. If not,
* free one and mark stats->rx_dropped++.
*/
netdev->stats.rx_dropped++;
break;
}
memcpy(skb->data, rxd->packet, rx_size);
skb_put(skb, rx_size);
skb->protocol = eth_type_trans(skb, netdev);
if (rxd->status.vlan) {
u16 vlan_tag = (rxd->status.vtag>>4) |
((rxd->status.vtag&7) << 13) |
((rxd->status.vtag&8) << 9);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
}
netif_rx(skb);
netdev->stats.rx_bytes += rx_size;
netdev->stats.rx_packets++;
} else {
netdev->stats.rx_errors++;
if (rxd->status.ok && rxd->status.pkt_size <= 60)
netdev->stats.rx_length_errors++;
if (rxd->status.mcast)
netdev->stats.multicast++;
if (rxd->status.crc)
netdev->stats.rx_crc_errors++;
if (rxd->status.align)
netdev->stats.rx_frame_errors++;
}
/* advance write ptr */
if (++adapter->rxd_write_ptr == adapter->rxd_ring_size)
adapter->rxd_write_ptr = 0;
} while (1);
/* update mailbox? */
adapter->rxd_read_ptr = adapter->rxd_write_ptr;
ATL2_WRITE_REGW(&adapter->hw, REG_MB_RXD_RD_IDX, adapter->rxd_read_ptr);
}
static void atl2_intr_tx(struct atl2_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
u32 txd_read_ptr;
u32 txs_write_ptr;
struct tx_pkt_status *txs;
struct tx_pkt_header *txph;
int free_hole = 0;
do {
txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
txs = adapter->txs_ring + txs_write_ptr;
if (!txs->update)
break; /* tx stop here */
free_hole = 1;
txs->update = 0;
if (++txs_write_ptr == adapter->txs_ring_size)
txs_write_ptr = 0;
atomic_set(&adapter->txs_write_ptr, (int)txs_write_ptr);
txd_read_ptr = (u32) atomic_read(&adapter->txd_read_ptr);
txph = (struct tx_pkt_header *)
(((u8 *)adapter->txd_ring) + txd_read_ptr);
if (txph->pkt_size != txs->pkt_size) {
struct tx_pkt_status *old_txs = txs;
printk(KERN_WARNING
"%s: txs packet size not consistent with txd"
" txd_:0x%08x, txs_:0x%08x!\n",
adapter->netdev->name,
*(u32 *)txph, *(u32 *)txs);
printk(KERN_WARNING
"txd read ptr: 0x%x\n",
txd_read_ptr);
txs = adapter->txs_ring + txs_write_ptr;
printk(KERN_WARNING
"txs-behind:0x%08x\n",
*(u32 *)txs);
if (txs_write_ptr < 2) {
txs = adapter->txs_ring +
(adapter->txs_ring_size +
txs_write_ptr - 2);
} else {
txs = adapter->txs_ring + (txs_write_ptr - 2);
}
printk(KERN_WARNING
"txs-before:0x%08x\n",
*(u32 *)txs);
txs = old_txs;
}
/* 4for TPH */
txd_read_ptr += (((u32)(txph->pkt_size) + 7) & ~3);
if (txd_read_ptr >= adapter->txd_ring_size)
txd_read_ptr -= adapter->txd_ring_size;
atomic_set(&adapter->txd_read_ptr, (int)txd_read_ptr);
/* tx statistics: */
if (txs->ok) {
netdev->stats.tx_bytes += txs->pkt_size;
netdev->stats.tx_packets++;
}
else
netdev->stats.tx_errors++;
if (txs->defer)
netdev->stats.collisions++;
if (txs->abort_col)
netdev->stats.tx_aborted_errors++;
if (txs->late_col)
netdev->stats.tx_window_errors++;
if (txs->underun)
netdev->stats.tx_fifo_errors++;
} while (1);
if (free_hole) {
if (netif_queue_stopped(adapter->netdev) &&
netif_carrier_ok(adapter->netdev))
netif_wake_queue(adapter->netdev);
}
}
static void atl2_check_for_link(struct atl2_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
u16 phy_data = 0;
spin_lock(&adapter->stats_lock);
atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
spin_unlock(&adapter->stats_lock);
/* notify upper layer link down ASAP */
if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */
if (netif_carrier_ok(netdev)) { /* old link state: Up */
printk(KERN_INFO "%s: %s NIC Link is Down\n",
atl2_driver_name, netdev->name);
adapter->link_speed = SPEED_0;
netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
}
schedule_work(&adapter->link_chg_task);
}
static inline void atl2_clear_phy_int(struct atl2_adapter *adapter)
{
u16 phy_data;
spin_lock(&adapter->stats_lock);
atl2_read_phy_reg(&adapter->hw, 19, &phy_data);
spin_unlock(&adapter->stats_lock);
}
/**
* atl2_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
*/
static irqreturn_t atl2_intr(int irq, void *data)
{
struct atl2_adapter *adapter = netdev_priv(data);
struct atl2_hw *hw = &adapter->hw;
u32 status;
status = ATL2_READ_REG(hw, REG_ISR);
if (0 == status)
return IRQ_NONE;
/* link event */
if (status & ISR_PHY)
atl2_clear_phy_int(adapter);
/* clear ISR status, and Enable CMB DMA/Disable Interrupt */
ATL2_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
/* check if PCIE PHY Link down */
if (status & ISR_PHY_LINKDOWN) {
if (netif_running(adapter->netdev)) { /* reset MAC */
ATL2_WRITE_REG(hw, REG_ISR, 0);
ATL2_WRITE_REG(hw, REG_IMR, 0);
ATL2_WRITE_FLUSH(hw);
schedule_work(&adapter->reset_task);
return IRQ_HANDLED;
}
}
/* check if DMA read/write error? */
if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
ATL2_WRITE_REG(hw, REG_ISR, 0);
ATL2_WRITE_REG(hw, REG_IMR, 0);
ATL2_WRITE_FLUSH(hw);
schedule_work(&adapter->reset_task);
return IRQ_HANDLED;
}
/* link event */
if (status & (ISR_PHY | ISR_MANUAL)) {
adapter->netdev->stats.tx_carrier_errors++;
atl2_check_for_link(adapter);
}
/* transmit event */
if (status & ISR_TX_EVENT)
atl2_intr_tx(adapter);
/* rx exception */
if (status & ISR_RX_EVENT)
atl2_intr_rx(adapter);
/* re-enable Interrupt */
ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
return IRQ_HANDLED;
}
static int atl2_request_irq(struct atl2_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int flags, err = 0;
flags = IRQF_SHARED;
adapter->have_msi = true;
err = pci_enable_msi(adapter->pdev);
if (err)
adapter->have_msi = false;
if (adapter->have_msi)
flags &= ~IRQF_SHARED;
return request_irq(adapter->pdev->irq, atl2_intr, flags, netdev->name,
netdev);
}
/**
* atl2_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
* Free all transmit software resources
*/
static void atl2_free_ring_resources(struct atl2_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr,
adapter->ring_dma);
}
/**
* atl2_open - Called when a network interface is made active
* @netdev: network interface device structure
*
* Returns 0 on success, negative value on failure
*
* The open entry point is called when a network interface is made
* active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
*/
static int atl2_open(struct net_device *netdev)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
int err;
u32 val;
/* disallow open during test */
if (test_bit(__ATL2_TESTING, &adapter->flags))
return -EBUSY;
/* allocate transmit descriptors */
err = atl2_setup_ring_resources(adapter);
if (err)
return err;
err = atl2_init_hw(&adapter->hw);
if (err) {
err = -EIO;
goto err_init_hw;
}
/* hardware has been reset, we need to reload some things */
atl2_set_multi(netdev);
init_ring_ptrs(adapter);
atl2_restore_vlan(adapter);
if (atl2_configure(adapter)) {
err = -EIO;
goto err_config;
}
err = atl2_request_irq(adapter);
if (err)
goto err_req_irq;
clear_bit(__ATL2_DOWN, &adapter->flags);
mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 4*HZ));
val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
val | MASTER_CTRL_MANUAL_INT);
atl2_irq_enable(adapter);
return 0;
err_init_hw:
err_req_irq:
err_config:
atl2_free_ring_resources(adapter);
atl2_reset_hw(&adapter->hw);
return err;
}
static void atl2_down(struct atl2_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
/* signal that we're down so the interrupt handler does not
* reschedule our watchdog timer */
set_bit(__ATL2_DOWN, &adapter->flags);
netif_tx_disable(netdev);
/* reset MAC to disable all RX/TX */
atl2_reset_hw(&adapter->hw);
msleep(1);
atl2_irq_disable(adapter);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_config_timer);
clear_bit(0, &adapter->cfg_phy);
netif_carrier_off(netdev);
adapter->link_speed = SPEED_0;
adapter->link_duplex = -1;
}
static void atl2_free_irq(struct atl2_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
free_irq(adapter->pdev->irq, netdev);
#ifdef CONFIG_PCI_MSI
if (adapter->have_msi)
pci_disable_msi(adapter->pdev);
#endif
}
/**
* atl2_close - Disables a network interface
* @netdev: network interface device structure
*
* Returns 0, this is not allowed to fail
*
* The close entry point is called when an interface is de-activated
* by the OS. The hardware is still under the drivers control, but
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
*/
static int atl2_close(struct net_device *netdev)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
atl2_down(adapter);
atl2_free_irq(adapter);
atl2_free_ring_resources(adapter);
return 0;
}
static inline int TxsFreeUnit(struct atl2_adapter *adapter)
{
u32 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
return (adapter->txs_next_clear >= txs_write_ptr) ?
(int) (adapter->txs_ring_size - adapter->txs_next_clear +
txs_write_ptr - 1) :
(int) (txs_write_ptr - adapter->txs_next_clear - 1);
}
static inline int TxdFreeBytes(struct atl2_adapter *adapter)
{
u32 txd_read_ptr = (u32)atomic_read(&adapter->txd_read_ptr);
return (adapter->txd_write_ptr >= txd_read_ptr) ?
(int) (adapter->txd_ring_size - adapter->txd_write_ptr +
txd_read_ptr - 1) :
(int) (txd_read_ptr - adapter->txd_write_ptr - 1);
}
static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct tx_pkt_header *txph;
u32 offset, copy_len;
int txs_unused;
int txbuf_unused;
if (test_bit(__ATL2_DOWN, &adapter->flags)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (unlikely(skb->len <= 0)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
txs_unused = TxsFreeUnit(adapter);
txbuf_unused = TxdFreeBytes(adapter);
if (skb->len + sizeof(struct tx_pkt_header) + 4 > txbuf_unused ||
txs_unused < 1) {
/* not enough resources */
netif_stop_queue(netdev);
return NETDEV_TX_BUSY;
}
offset = adapter->txd_write_ptr;
txph = (struct tx_pkt_header *) (((u8 *)adapter->txd_ring) + offset);
*(u32 *)txph = 0;
txph->pkt_size = skb->len;
offset += 4;
if (offset >= adapter->txd_ring_size)
offset -= adapter->txd_ring_size;
copy_len = adapter->txd_ring_size - offset;
if (copy_len >= skb->len) {
memcpy(((u8 *)adapter->txd_ring) + offset, skb->data, skb->len);
offset += ((u32)(skb->len + 3) & ~3);
} else {
memcpy(((u8 *)adapter->txd_ring)+offset, skb->data, copy_len);
memcpy((u8 *)adapter->txd_ring, skb->data+copy_len,
skb->len-copy_len);
offset = ((u32)(skb->len-copy_len + 3) & ~3);
}
#ifdef NETIF_F_HW_VLAN_CTAG_TX
if (skb_vlan_tag_present(skb)) {
u16 vlan_tag = skb_vlan_tag_get(skb);
vlan_tag = (vlan_tag << 4) |
(vlan_tag >> 13) |
((vlan_tag >> 9) & 0x8);
txph->ins_vlan = 1;
txph->vlan = vlan_tag;
}
#endif
if (offset >= adapter->txd_ring_size)
offset -= adapter->txd_ring_size;
adapter->txd_write_ptr = offset;
/* clear txs before send */
adapter->txs_ring[adapter->txs_next_clear].update = 0;
if (++adapter->txs_next_clear == adapter->txs_ring_size)
adapter->txs_next_clear = 0;
ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX,
(adapter->txd_write_ptr >> 2));
mmiowb();
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/**
* atl2_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
*
* Returns 0 on success, negative on failure
*/
static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
if ((new_mtu < 40) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE)))
return -EINVAL;
/* set MTU */
if (hw->max_frame_size != new_mtu) {
netdev->mtu = new_mtu;
ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ENET_HEADER_SIZE +
VLAN_SIZE + ETHERNET_FCS_SIZE);
}
return 0;
}
/**
* atl2_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
*
* Returns 0 on success, negative on failure
*/
static int atl2_set_mac(struct net_device *netdev, void *p)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
if (netif_running(netdev))
return -EBUSY;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atl2_set_mac_addr(&adapter->hw);
return 0;
}
static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct mii_ioctl_data *data = if_mii(ifr);
unsigned long flags;
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = 0;
break;
case SIOCGMIIREG:
spin_lock_irqsave(&adapter->stats_lock, flags);
if (atl2_read_phy_reg(&adapter->hw,
data->reg_num & 0x1F, &data->val_out)) {
spin_unlock_irqrestore(&adapter->stats_lock, flags);
return -EIO;
}
spin_unlock_irqrestore(&adapter->stats_lock, flags);
break;
case SIOCSMIIREG:
if (data->reg_num & ~(0x1F))
return -EFAULT;
spin_lock_irqsave(&adapter->stats_lock, flags);
if (atl2_write_phy_reg(&adapter->hw, data->reg_num,
data->val_in)) {
spin_unlock_irqrestore(&adapter->stats_lock, flags);
return -EIO;
}
spin_unlock_irqrestore(&adapter->stats_lock, flags);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
return atl2_mii_ioctl(netdev, ifr, cmd);
#ifdef ETHTOOL_OPS_COMPAT
case SIOCETHTOOL:
return ethtool_ioctl(ifr);
#endif
default:
return -EOPNOTSUPP;
}
}
/**
* atl2_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
static void atl2_tx_timeout(struct net_device *netdev)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
/* Do the reset outside of interrupt context */
schedule_work(&adapter->reset_task);
}
/**
* atl2_watchdog - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
static void atl2_watchdog(unsigned long data)
{
struct atl2_adapter *adapter = (struct atl2_adapter *) data;
if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
u32 drop_rxd, drop_rxs;
unsigned long flags;
spin_lock_irqsave(&adapter->stats_lock, flags);
drop_rxd = ATL2_READ_REG(&adapter->hw, REG_STS_RXD_OV);
drop_rxs = ATL2_READ_REG(&adapter->hw, REG_STS_RXS_OV);
spin_unlock_irqrestore(&adapter->stats_lock, flags);
adapter->netdev->stats.rx_over_errors += drop_rxd + drop_rxs;
/* Reset the timer */
mod_timer(&adapter->watchdog_timer,
round_jiffies(jiffies + 4 * HZ));
}
}
/**
* atl2_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
static void atl2_phy_config(unsigned long data)
{
struct atl2_adapter *adapter = (struct atl2_adapter *) data;
struct atl2_hw *hw = &adapter->hw;
unsigned long flags;
spin_lock_irqsave(&adapter->stats_lock, flags);
atl2_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
atl2_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
spin_unlock_irqrestore(&adapter->stats_lock, flags);
clear_bit(0, &adapter->cfg_phy);
}
static int atl2_up(struct atl2_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int err = 0;
u32 val;
/* hardware has been reset, we need to reload some things */
err = atl2_init_hw(&adapter->hw);
if (err) {
err = -EIO;
return err;
}
atl2_set_multi(netdev);
init_ring_ptrs(adapter);
atl2_restore_vlan(adapter);
if (atl2_configure(adapter)) {
err = -EIO;
goto err_up;
}
clear_bit(__ATL2_DOWN, &adapter->flags);
val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val |
MASTER_CTRL_MANUAL_INT);
atl2_irq_enable(adapter);
err_up:
return err;
}
static void atl2_reinit_locked(struct atl2_adapter *adapter)
{
WARN_ON(in_interrupt());
while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
msleep(1);
atl2_down(adapter);
atl2_up(adapter);
clear_bit(__ATL2_RESETTING, &adapter->flags);
}
static void atl2_reset_task(struct work_struct *work)
{
struct atl2_adapter *adapter;
adapter = container_of(work, struct atl2_adapter, reset_task);
atl2_reinit_locked(adapter);
}
static void atl2_setup_mac_ctrl(struct atl2_adapter *adapter)
{
u32 value;
struct atl2_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
/* Config MAC CTRL Register */
value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
/* duplex */
if (FULL_DUPLEX == adapter->link_duplex)
value |= MAC_CTRL_DUPLX;
/* flow control */
value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
/* PAD & CRC */
value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
/* preamble length */
value |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) <<
MAC_CTRL_PRMLEN_SHIFT);
/* vlan */
__atl2_vlan_mode(netdev->features, &value);
/* filter mode */
value |= MAC_CTRL_BC_EN;
if (netdev->flags & IFF_PROMISC)
value |= MAC_CTRL_PROMIS_EN;
else if (netdev->flags & IFF_ALLMULTI)
value |= MAC_CTRL_MC_ALL_EN;
/* half retry buffer */
value |= (((u32)(adapter->hw.retry_buf &
MAC_CTRL_HALF_LEFT_BUF_MASK)) << MAC_CTRL_HALF_LEFT_BUF_SHIFT);
ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
}
static int atl2_check_link(struct atl2_adapter *adapter)
{
struct atl2_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
int ret_val;
u16 speed, duplex, phy_data;
int reconfig = 0;
/* MII_BMSR must read twise */
atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
if (!(phy_data&BMSR_LSTATUS)) { /* link down */
if (netif_carrier_ok(netdev)) { /* old link state: Up */
u32 value;
/* disable rx */
value = ATL2_READ_REG(hw, REG_MAC_CTRL);
value &= ~MAC_CTRL_RX_EN;
ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
adapter->link_speed = SPEED_0;
netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
return 0;
}
/* Link Up */
ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val)
return ret_val;
switch (hw->MediaType) {
case MEDIA_TYPE_100M_FULL:
if (speed != SPEED_100 || duplex != FULL_DUPLEX)
reconfig = 1;
break;
case MEDIA_TYPE_100M_HALF:
if (speed != SPEED_100 || duplex != HALF_DUPLEX)
reconfig = 1;
break;
case MEDIA_TYPE_10M_FULL:
if (speed != SPEED_10 || duplex != FULL_DUPLEX)
reconfig = 1;
break;
case MEDIA_TYPE_10M_HALF:
if (speed != SPEED_10 || duplex != HALF_DUPLEX)
reconfig = 1;
break;
}
/* link result is our setting */
if (reconfig == 0) {
if (adapter->link_speed != speed ||
adapter->link_duplex != duplex) {
adapter->link_speed = speed;
adapter->link_duplex = duplex;
atl2_setup_mac_ctrl(adapter);
printk(KERN_INFO "%s: %s NIC Link is Up<%d Mbps %s>\n",
atl2_driver_name, netdev->name,
adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ?
"Full Duplex" : "Half Duplex");
}
if (!netif_carrier_ok(netdev)) { /* Link down -> Up */
netif_carrier_on(netdev);
netif_wake_queue(netdev);
}
return 0;
}
/* change original link status */
if (netif_carrier_ok(netdev)) {
u32 value;
/* disable rx */
value = ATL2_READ_REG(hw, REG_MAC_CTRL);
value &= ~MAC_CTRL_RX_EN;
ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
adapter->link_speed = SPEED_0;
netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
/* auto-neg, insert timer to re-config phy
* (if interval smaller than 5 seconds, something strange) */
if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
if (!test_and_set_bit(0, &adapter->cfg_phy))
mod_timer(&adapter->phy_config_timer,
round_jiffies(jiffies + 5 * HZ));
}
return 0;
}
/**
* atl2_link_chg_task - deal with link change event Out of interrupt context
*/
static void atl2_link_chg_task(struct work_struct *work)
{
struct atl2_adapter *adapter;
unsigned long flags;
adapter = container_of(work, struct atl2_adapter, link_chg_task);
spin_lock_irqsave(&adapter->stats_lock, flags);
atl2_check_link(adapter);
spin_unlock_irqrestore(&adapter->stats_lock, flags);
}
static void atl2_setup_pcicmd(struct pci_dev *pdev)
{
u16 cmd;
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
if (cmd & PCI_COMMAND_INTX_DISABLE)
cmd &= ~PCI_COMMAND_INTX_DISABLE;
if (cmd & PCI_COMMAND_IO)
cmd &= ~PCI_COMMAND_IO;
if (0 == (cmd & PCI_COMMAND_MEMORY))
cmd |= PCI_COMMAND_MEMORY;
if (0 == (cmd & PCI_COMMAND_MASTER))
cmd |= PCI_COMMAND_MASTER;
pci_write_config_word(pdev, PCI_COMMAND, cmd);
/*
* some motherboards BIOS(PXE/EFI) driver may set PME
* while they transfer control to OS (Windows/Linux)
* so we should clear this bit before NIC work normally
*/
pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void atl2_poll_controller(struct net_device *netdev)
{
disable_irq(netdev->irq);
atl2_intr(netdev->irq, netdev);
enable_irq(netdev->irq);
}
#endif
static const struct net_device_ops atl2_netdev_ops = {
.ndo_open = atl2_open,
.ndo_stop = atl2_close,
.ndo_start_xmit = atl2_xmit_frame,
.ndo_set_rx_mode = atl2_set_multi,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = atl2_set_mac,
.ndo_change_mtu = atl2_change_mtu,
.ndo_fix_features = atl2_fix_features,
.ndo_set_features = atl2_set_features,
.ndo_do_ioctl = atl2_ioctl,
.ndo_tx_timeout = atl2_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = atl2_poll_controller,
#endif
};
/**
* atl2_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl2_pci_tbl
*
* Returns 0 on success, negative on failure
*
* atl2_probe initializes an adapter identified by a pci_dev structure.
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
*/
static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct atl2_adapter *adapter;
static int cards_found;
unsigned long mmio_start;
int mmio_len;
int err;
cards_found = 0;
err = pci_enable_device(pdev);
if (err)
return err;
/*
* atl2 is a shared-high-32-bit device, so we're stuck with 32-bit DMA
* until the kernel has the proper infrastructure to support 64-bit DMA
* on these devices.
*/
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");
goto err_dma;
}
/* Mark all PCI regions associated with PCI device
* pdev as being reserved by owner atl2_driver_name */
err = pci_request_regions(pdev, atl2_driver_name);
if (err)
goto err_pci_reg;
/* Enables bus-mastering on the device and calls
* pcibios_set_master to do the needed arch specific settings */
pci_set_master(pdev);
err = -ENOMEM;
netdev = alloc_etherdev(sizeof(struct atl2_adapter));
if (!netdev)
goto err_alloc_etherdev;
SET_NETDEV_DEV(netdev, &pdev->dev);
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
adapter->hw.back = adapter;
mmio_start = pci_resource_start(pdev, 0x0);
mmio_len = pci_resource_len(pdev, 0x0);
adapter->hw.mem_rang = (u32)mmio_len;
adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
if (!adapter->hw.hw_addr) {
err = -EIO;
goto err_ioremap;
}
atl2_setup_pcicmd(pdev);
netdev->netdev_ops = &atl2_netdev_ops;
netdev->ethtool_ops = &atl2_ethtool_ops;
netdev->watchdog_timeo = 5 * HZ;
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len;
adapter->bd_number = cards_found;
adapter->pci_using_64 = false;
/* setup the private structure */
err = atl2_sw_init(adapter);
if (err)
goto err_sw_init;
err = -EIO;
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
/* Init PHY as early as possible due to power saving issue */
atl2_phy_init(&adapter->hw);
/* reset the controller to
* put the device in a known good starting state */
if (atl2_reset_hw(&adapter->hw)) {
err = -EIO;
goto err_reset;
}
/* copy the MAC address out of the EEPROM */
atl2_read_mac_addr(&adapter->hw);
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->dev_addr)) {
err = -EIO;
goto err_eeprom;
}
atl2_check_options(adapter);
setup_timer(&adapter->watchdog_timer, atl2_watchdog,
(unsigned long)adapter);
setup_timer(&adapter->phy_config_timer, atl2_phy_config,
(unsigned long)adapter);
INIT_WORK(&adapter->reset_task, atl2_reset_task);
INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
strcpy(netdev->name, "eth%d"); /* ?? */
err = register_netdev(netdev);
if (err)
goto err_register;
/* assume we have no link for now */
netif_carrier_off(netdev);
netif_stop_queue(netdev);
cards_found++;
return 0;
err_reset:
err_register:
err_sw_init:
err_eeprom:
iounmap(adapter->hw.hw_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
return err;
}
/**
* atl2_remove - Device Removal Routine
* @pdev: PCI device information struct
*
* atl2_remove is called by the PCI subsystem to alert the driver
* that it should release a PCI device. The could be caused by a
* Hot-Plug event, or because the driver is going to be removed from
* memory.
*/
/* FIXME: write the original MAC address back in case it was changed from a
* BIOS-set value, as in atl1 -- CHS */
static void atl2_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl2_adapter *adapter = netdev_priv(netdev);
/* flush_scheduled work may reschedule our watchdog task, so
* explicitly disable watchdog tasks from being rescheduled */
set_bit(__ATL2_DOWN, &adapter->flags);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_config_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->link_chg_task);
unregister_netdev(netdev);
atl2_force_ps(&adapter->hw);
iounmap(adapter->hw.hw_addr);
pci_release_regions(pdev);
free_netdev(netdev);
pci_disable_device(pdev);
}
static int atl2_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
u16 speed, duplex;
u32 ctrl = 0;
u32 wufc = adapter->wol;
#ifdef CONFIG_PM
int retval = 0;
#endif
netif_device_detach(netdev);
if (netif_running(netdev)) {
WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
atl2_down(adapter);
}
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
if (retval)
return retval;
#endif
atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
if (ctrl & BMSR_LSTATUS)
wufc &= ~ATLX_WUFC_LNKC;
if (0 != (ctrl & BMSR_LSTATUS) && 0 != wufc) {
u32 ret_val;
/* get current link speed & duplex */
ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
printk(KERN_DEBUG
"%s: get speed&duplex error while suspend\n",
atl2_driver_name);
goto wol_dis;
}
ctrl = 0;
/* turn on magic packet wol */
if (wufc & ATLX_WUFC_MAG)
ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
/* ignore Link Chg event when Link is up */
ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
/* Config MAC CTRL Register */
ctrl = MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
if (FULL_DUPLEX == adapter->link_duplex)
ctrl |= MAC_CTRL_DUPLX;
ctrl |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
ctrl |= (((u32)adapter->hw.preamble_len &
MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
ctrl |= (((u32)(adapter->hw.retry_buf &
MAC_CTRL_HALF_LEFT_BUF_MASK)) <<
MAC_CTRL_HALF_LEFT_BUF_SHIFT);
if (wufc & ATLX_WUFC_MAG) {
/* magic packet maybe Broadcast&multicast&Unicast */
ctrl |= MAC_CTRL_BC_EN;
}
ATL2_WRITE_REG(hw, REG_MAC_CTRL, ctrl);
/* pcie patch */
ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
goto suspend_exit;
}
if (0 == (ctrl&BMSR_LSTATUS) && 0 != (wufc&ATLX_WUFC_LNKC)) {
/* link is down, so only LINK CHG WOL event enable */
ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
ATL2_WRITE_REG(hw, REG_MAC_CTRL, 0);
/* pcie patch */
ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
hw->phy_configured = false; /* re-init PHY when resume */
pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
goto suspend_exit;
}
wol_dis:
/* WOL disabled */
ATL2_WRITE_REG(hw, REG_WOL_CTRL, 0);
/* pcie patch */
ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
atl2_force_ps(hw);
hw->phy_configured = false; /* re-init PHY when resume */
pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
suspend_exit:
if (netif_running(netdev))
atl2_free_irq(adapter);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
#ifdef CONFIG_PM
static int atl2_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl2_adapter *adapter = netdev_priv(netdev);
u32 err;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR
"atl2: Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
ATL2_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
if (netif_running(netdev)) {
err = atl2_request_irq(adapter);
if (err)
return err;
}
atl2_reset_hw(&adapter->hw);
if (netif_running(netdev))
atl2_up(adapter);
netif_device_attach(netdev);
return 0;
}
#endif
static void atl2_shutdown(struct pci_dev *pdev)
{
atl2_suspend(pdev, PMSG_SUSPEND);
}
static struct pci_driver atl2_driver = {
.name = atl2_driver_name,
.id_table = atl2_pci_tbl,
.probe = atl2_probe,
.remove = atl2_remove,
/* Power Management Hooks */
.suspend = atl2_suspend,
#ifdef CONFIG_PM
.resume = atl2_resume,
#endif
.shutdown = atl2_shutdown,
};
/**
* atl2_init_module - Driver Registration Routine
*
* atl2_init_module is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem.
*/
static int __init atl2_init_module(void)
{
printk(KERN_INFO "%s - version %s\n", atl2_driver_string,
atl2_driver_version);
printk(KERN_INFO "%s\n", atl2_copyright);
return pci_register_driver(&atl2_driver);
}
module_init(atl2_init_module);
/**
* atl2_exit_module - Driver Exit Cleanup Routine
*
* atl2_exit_module is called just before the driver is removed
* from memory.
*/
static void __exit atl2_exit_module(void)
{
pci_unregister_driver(&atl2_driver);
}
module_exit(atl2_exit_module);
static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
{
struct atl2_adapter *adapter = hw->back;
pci_read_config_word(adapter->pdev, reg, value);
}
static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
{
struct atl2_adapter *adapter = hw->back;
pci_write_config_word(adapter->pdev, reg, *value);
}
static int atl2_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_TP);
ecmd->advertising = ADVERTISED_TP;
ecmd->advertising |= ADVERTISED_Autoneg;
ecmd->advertising |= hw->autoneg_advertised;
ecmd->port = PORT_TP;
ecmd->phy_address = 0;
ecmd->transceiver = XCVR_INTERNAL;
if (adapter->link_speed != SPEED_0) {
ethtool_cmd_speed_set(ecmd, adapter->link_speed);
if (adapter->link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
} else {
ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_ENABLE;
return 0;
}
static int atl2_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
msleep(1);
if (ecmd->autoneg == AUTONEG_ENABLE) {
#define MY_ADV_MASK (ADVERTISE_10_HALF | \
ADVERTISE_10_FULL | \
ADVERTISE_100_HALF| \
ADVERTISE_100_FULL)
if ((ecmd->advertising & MY_ADV_MASK) == MY_ADV_MASK) {
hw->MediaType = MEDIA_TYPE_AUTO_SENSOR;
hw->autoneg_advertised = MY_ADV_MASK;
} else if ((ecmd->advertising & MY_ADV_MASK) ==
ADVERTISE_100_FULL) {
hw->MediaType = MEDIA_TYPE_100M_FULL;
hw->autoneg_advertised = ADVERTISE_100_FULL;
} else if ((ecmd->advertising & MY_ADV_MASK) ==
ADVERTISE_100_HALF) {
hw->MediaType = MEDIA_TYPE_100M_HALF;
hw->autoneg_advertised = ADVERTISE_100_HALF;
} else if ((ecmd->advertising & MY_ADV_MASK) ==
ADVERTISE_10_FULL) {
hw->MediaType = MEDIA_TYPE_10M_FULL;
hw->autoneg_advertised = ADVERTISE_10_FULL;
} else if ((ecmd->advertising & MY_ADV_MASK) ==
ADVERTISE_10_HALF) {
hw->MediaType = MEDIA_TYPE_10M_HALF;
hw->autoneg_advertised = ADVERTISE_10_HALF;
} else {
clear_bit(__ATL2_RESETTING, &adapter->flags);
return -EINVAL;
}
ecmd->advertising = hw->autoneg_advertised |
ADVERTISED_TP | ADVERTISED_Autoneg;
} else {
clear_bit(__ATL2_RESETTING, &adapter->flags);
return -EINVAL;
}
/* reset the link */
if (netif_running(adapter->netdev)) {
atl2_down(adapter);
atl2_up(adapter);
} else
atl2_reset_hw(&adapter->hw);
clear_bit(__ATL2_RESETTING, &adapter->flags);
return 0;
}
static u32 atl2_get_msglevel(struct net_device *netdev)
{
return 0;
}
/*
* It's sane for this to be empty, but we might want to take advantage of this.
*/
static void atl2_set_msglevel(struct net_device *netdev, u32 data)
{
}
static int atl2_get_regs_len(struct net_device *netdev)
{
#define ATL2_REGS_LEN 42
return sizeof(u32) * ATL2_REGS_LEN;
}
static void atl2_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
u32 *regs_buff = p;
u16 phy_data;
memset(p, 0, sizeof(u32) * ATL2_REGS_LEN);
regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
regs_buff[0] = ATL2_READ_REG(hw, REG_VPD_CAP);
regs_buff[1] = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
regs_buff[2] = ATL2_READ_REG(hw, REG_SPI_FLASH_CONFIG);
regs_buff[3] = ATL2_READ_REG(hw, REG_TWSI_CTRL);
regs_buff[4] = ATL2_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL);
regs_buff[5] = ATL2_READ_REG(hw, REG_MASTER_CTRL);
regs_buff[6] = ATL2_READ_REG(hw, REG_MANUAL_TIMER_INIT);
regs_buff[7] = ATL2_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT);
regs_buff[8] = ATL2_READ_REG(hw, REG_PHY_ENABLE);
regs_buff[9] = ATL2_READ_REG(hw, REG_CMBDISDMA_TIMER);
regs_buff[10] = ATL2_READ_REG(hw, REG_IDLE_STATUS);
regs_buff[11] = ATL2_READ_REG(hw, REG_MDIO_CTRL);
regs_buff[12] = ATL2_READ_REG(hw, REG_SERDES_LOCK);
regs_buff[13] = ATL2_READ_REG(hw, REG_MAC_CTRL);
regs_buff[14] = ATL2_READ_REG(hw, REG_MAC_IPG_IFG);
regs_buff[15] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
regs_buff[16] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR+4);
regs_buff[17] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE);
regs_buff[18] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE+4);
regs_buff[19] = ATL2_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL);
regs_buff[20] = ATL2_READ_REG(hw, REG_MTU);
regs_buff[21] = ATL2_READ_REG(hw, REG_WOL_CTRL);
regs_buff[22] = ATL2_READ_REG(hw, REG_SRAM_TXRAM_END);
regs_buff[23] = ATL2_READ_REG(hw, REG_DESC_BASE_ADDR_HI);
regs_buff[24] = ATL2_READ_REG(hw, REG_TXD_BASE_ADDR_LO);
regs_buff[25] = ATL2_READ_REG(hw, REG_TXD_MEM_SIZE);
regs_buff[26] = ATL2_READ_REG(hw, REG_TXS_BASE_ADDR_LO);
regs_buff[27] = ATL2_READ_REG(hw, REG_TXS_MEM_SIZE);
regs_buff[28] = ATL2_READ_REG(hw, REG_RXD_BASE_ADDR_LO);
regs_buff[29] = ATL2_READ_REG(hw, REG_RXD_BUF_NUM);
regs_buff[30] = ATL2_READ_REG(hw, REG_DMAR);
regs_buff[31] = ATL2_READ_REG(hw, REG_TX_CUT_THRESH);
regs_buff[32] = ATL2_READ_REG(hw, REG_DMAW);
regs_buff[33] = ATL2_READ_REG(hw, REG_PAUSE_ON_TH);
regs_buff[34] = ATL2_READ_REG(hw, REG_PAUSE_OFF_TH);
regs_buff[35] = ATL2_READ_REG(hw, REG_MB_TXD_WR_IDX);
regs_buff[36] = ATL2_READ_REG(hw, REG_MB_RXD_RD_IDX);
regs_buff[38] = ATL2_READ_REG(hw, REG_ISR);
regs_buff[39] = ATL2_READ_REG(hw, REG_IMR);
atl2_read_phy_reg(hw, MII_BMCR, &phy_data);
regs_buff[40] = (u32)phy_data;
atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
regs_buff[41] = (u32)phy_data;
}
static int atl2_get_eeprom_len(struct net_device *netdev)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
if (!atl2_check_eeprom_exist(&adapter->hw))
return 512;
else
return 0;
}
static int atl2_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
u32 *eeprom_buff;
int first_dword, last_dword;
int ret_val = 0;
int i;
if (eeprom->len == 0)
return -EINVAL;
if (atl2_check_eeprom_exist(hw))
return -EINVAL;
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
first_dword = eeprom->offset >> 2;
last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
eeprom_buff = kmalloc(sizeof(u32) * (last_dword - first_dword + 1),
GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
for (i = first_dword; i < last_dword; i++) {
if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) {
ret_val = -EIO;
goto free;
}
}
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
eeprom->len);
free:
kfree(eeprom_buff);
return ret_val;
}
static int atl2_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
u32 *eeprom_buff;
u32 *ptr;
int max_len, first_dword, last_dword, ret_val = 0;
int i;
if (eeprom->len == 0)
return -EOPNOTSUPP;
if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
return -EFAULT;
max_len = 512;
first_dword = eeprom->offset >> 2;
last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
eeprom_buff = kmalloc(max_len, GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
ptr = eeprom_buff;
if (eeprom->offset & 3) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0]))) {
ret_val = -EIO;
goto out;
}
ptr++;
}
if (((eeprom->offset + eeprom->len) & 3)) {
/*
* need read/modify/write of last changed EEPROM word
* only the first byte of the word is being modified
*/
if (!atl2_read_eeprom(hw, last_dword * 4,
&(eeprom_buff[last_dword - first_dword]))) {
ret_val = -EIO;
goto out;
}
}
/* Device's eeprom is always little-endian, word addressable */
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_dword - first_dword + 1; i++) {
if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i])) {
ret_val = -EIO;
goto out;
}
}
out:
kfree(eeprom_buff);
return ret_val;
}
static void atl2_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, atl2_driver_version,
sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
static void atl2_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
if (adapter->wol & ATLX_WUFC_EX)
wol->wolopts |= WAKE_UCAST;
if (adapter->wol & ATLX_WUFC_MC)
wol->wolopts |= WAKE_MCAST;
if (adapter->wol & ATLX_WUFC_BC)
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & ATLX_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
if (adapter->wol & ATLX_WUFC_LNKC)
wol->wolopts |= WAKE_PHY;
}
static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
if (wol->wolopts & (WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
return -EOPNOTSUPP;
/* these settings will always override what we currently have */
adapter->wol = 0;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= ATLX_WUFC_MAG;
if (wol->wolopts & WAKE_PHY)
adapter->wol |= ATLX_WUFC_LNKC;
return 0;
}
static int atl2_nway_reset(struct net_device *netdev)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev))
atl2_reinit_locked(adapter);
return 0;
}
static const struct ethtool_ops atl2_ethtool_ops = {
.get_settings = atl2_get_settings,
.set_settings = atl2_set_settings,
.get_drvinfo = atl2_get_drvinfo,
.get_regs_len = atl2_get_regs_len,
.get_regs = atl2_get_regs,
.get_wol = atl2_get_wol,
.set_wol = atl2_set_wol,
.get_msglevel = atl2_get_msglevel,
.set_msglevel = atl2_set_msglevel,
.nway_reset = atl2_nway_reset,
.get_link = ethtool_op_get_link,
.get_eeprom_len = atl2_get_eeprom_len,
.get_eeprom = atl2_get_eeprom,
.set_eeprom = atl2_set_eeprom,
};
#define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \
(((a) & 0xff00ff00) >> 8))
#define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16))
#define SHORTSWAP(a) (((a) << 8) | ((a) >> 8))
/*
* Reset the transmit and receive units; mask and clear all interrupts.
*
* hw - Struct containing variables accessed by shared code
* return : 0 or idle status (if error)
*/
static s32 atl2_reset_hw(struct atl2_hw *hw)
{
u32 icr;
u16 pci_cfg_cmd_word;
int i;
/* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
atl2_read_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
if ((pci_cfg_cmd_word &
(CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) !=
(CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) {
pci_cfg_cmd_word |=
(CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER);
atl2_write_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
}
/* Clear Interrupt mask to stop board from generating
* interrupts & Clear any pending interrupt events
*/
/* FIXME */
/* ATL2_WRITE_REG(hw, REG_IMR, 0); */
/* ATL2_WRITE_REG(hw, REG_ISR, 0xffffffff); */
/* Issue Soft Reset to the MAC. This will reset the chip's
* transmit, receive, DMA. It will not effect
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST);
wmb();
msleep(1); /* delay about 1ms */
/* Wait at least 10ms for All module to be Idle */
for (i = 0; i < 10; i++) {
icr = ATL2_READ_REG(hw, REG_IDLE_STATUS);
if (!icr)
break;
msleep(1); /* delay 1 ms */
cpu_relax();
}
if (icr)
return icr;
return 0;
}
#define CUSTOM_SPI_CS_SETUP 2
#define CUSTOM_SPI_CLK_HI 2
#define CUSTOM_SPI_CLK_LO 2
#define CUSTOM_SPI_CS_HOLD 2
#define CUSTOM_SPI_CS_HI 3
static struct atl2_spi_flash_dev flash_table[] =
{
/* MFR WRSR READ PROGRAM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */
{"Atmel", 0x0, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62 },
{"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60 },
{"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7 },
};
static bool atl2_spi_read(struct atl2_hw *hw, u32 addr, u32 *buf)
{
int i;
u32 value;
ATL2_WRITE_REG(hw, REG_SPI_DATA, 0);
ATL2_WRITE_REG(hw, REG_SPI_ADDR, addr);
value = SPI_FLASH_CTRL_WAIT_READY |
(CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
SPI_FLASH_CTRL_CS_SETUP_SHIFT |
(CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) <<
SPI_FLASH_CTRL_CLK_HI_SHIFT |
(CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) <<
SPI_FLASH_CTRL_CLK_LO_SHIFT |
(CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) <<
SPI_FLASH_CTRL_CS_HOLD_SHIFT |
(CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) <<
SPI_FLASH_CTRL_CS_HI_SHIFT |
(0x1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT;
ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
value |= SPI_FLASH_CTRL_START;
ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
for (i = 0; i < 10; i++) {
msleep(1);
value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
if (!(value & SPI_FLASH_CTRL_START))
break;
}
if (value & SPI_FLASH_CTRL_START)
return false;
*buf = ATL2_READ_REG(hw, REG_SPI_DATA);
return true;
}
/*
* get_permanent_address
* return 0 if get valid mac address,
*/
static int get_permanent_address(struct atl2_hw *hw)
{
u32 Addr[2];
u32 i, Control;
u16 Register;
u8 EthAddr[ETH_ALEN];
bool KeyValid;
if (is_valid_ether_addr(hw->perm_mac_addr))
return 0;
Addr[0] = 0;
Addr[1] = 0;
if (!atl2_check_eeprom_exist(hw)) { /* eeprom exists */
Register = 0;
KeyValid = false;
/* Read out all EEPROM content */
i = 0;
while (1) {
if (atl2_read_eeprom(hw, i + 0x100, &Control)) {
if (KeyValid) {
if (Register == REG_MAC_STA_ADDR)
Addr[0] = Control;
else if (Register ==
(REG_MAC_STA_ADDR + 4))
Addr[1] = Control;
KeyValid = false;
} else if ((Control & 0xff) == 0x5A) {
KeyValid = true;
Register = (u16) (Control >> 16);
} else {
/* assume data end while encount an invalid KEYWORD */
break;
}
} else {
break; /* read error */
}
i += 4;
}
*(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
*(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
if (is_valid_ether_addr(EthAddr)) {
memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN);
return 0;
}
return 1;
}
/* see if SPI flash exists? */
Addr[0] = 0;
Addr[1] = 0;
Register = 0;
KeyValid = false;
i = 0;
while (1) {
if (atl2_spi_read(hw, i + 0x1f000, &Control)) {
if (KeyValid) {
if (Register == REG_MAC_STA_ADDR)
Addr[0] = Control;
else if (Register == (REG_MAC_STA_ADDR + 4))
Addr[1] = Control;
KeyValid = false;
} else if ((Control & 0xff) == 0x5A) {
KeyValid = true;
Register = (u16) (Control >> 16);
} else {
break; /* data end */
}
} else {
break; /* read error */
}
i += 4;
}
*(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
*(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *)&Addr[1]);
if (is_valid_ether_addr(EthAddr)) {
memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN);
return 0;
}
/* maybe MAC-address is from BIOS */
Addr[0] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
Addr[1] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR + 4);
*(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
*(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
if (is_valid_ether_addr(EthAddr)) {
memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN);
return 0;
}
return 1;
}
/*
* Reads the adapter's MAC address from the EEPROM
*
* hw - Struct containing variables accessed by shared code
*/
static s32 atl2_read_mac_addr(struct atl2_hw *hw)
{
if (get_permanent_address(hw)) {
/* for test */
/* FIXME: shouldn't we use eth_random_addr() here? */
hw->perm_mac_addr[0] = 0x00;
hw->perm_mac_addr[1] = 0x13;
hw->perm_mac_addr[2] = 0x74;
hw->perm_mac_addr[3] = 0x00;
hw->perm_mac_addr[4] = 0x5c;
hw->perm_mac_addr[5] = 0x38;
}
memcpy(hw->mac_addr, hw->perm_mac_addr, ETH_ALEN);
return 0;
}
/*
* Hashes an address to determine its location in the multicast table
*
* hw - Struct containing variables accessed by shared code
* mc_addr - the multicast address to hash
*
* atl2_hash_mc_addr
* purpose
* set hash value for a multicast address
* hash calcu processing :
* 1. calcu 32bit CRC for multicast address
* 2. reverse crc with MSB to LSB
*/
static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr)
{
u32 crc32, value;
int i;
value = 0;
crc32 = ether_crc_le(6, mc_addr);
for (i = 0; i < 32; i++)
value |= (((crc32 >> i) & 1) << (31 - i));
return value;
}
/*
* Sets the bit in the multicast table corresponding to the hash value.
*
* hw - Struct containing variables accessed by shared code
* hash_value - Multicast address hash value
*/
static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value)
{
u32 hash_bit, hash_reg;
u32 mta;
/* The HASH Table is a register array of 2 32-bit registers.
* It is treated like an array of 64 bits. We want to set
* bit BitArray[hash_value]. So we figure out what register
* the bit is in, read it, OR in the new bit, then write
* back the new value. The register is determined by the
* upper 7 bits of the hash value and the bit within that
* register are determined by the lower 5 bits of the value.
*/
hash_reg = (hash_value >> 31) & 0x1;
hash_bit = (hash_value >> 26) & 0x1F;
mta = ATL2_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg);
mta |= (1 << hash_bit);
ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta);
}
/*
* atl2_init_pcie - init PCIE module
*/
static void atl2_init_pcie(struct atl2_hw *hw)
{
u32 value;
value = LTSSM_TEST_MODE_DEF;
ATL2_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value);
value = PCIE_DLL_TX_CTRL1_DEF;
ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, value);
}
static void atl2_init_flash_opcode(struct atl2_hw *hw)
{
if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
hw->flash_vendor = 0; /* ATMEL */
/* Init OP table */
ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_PROGRAM,
flash_table[hw->flash_vendor].cmdPROGRAM);
ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_SC_ERASE,
flash_table[hw->flash_vendor].cmdSECTOR_ERASE);
ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_CHIP_ERASE,
flash_table[hw->flash_vendor].cmdCHIP_ERASE);
ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDID,
flash_table[hw->flash_vendor].cmdRDID);
ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WREN,
flash_table[hw->flash_vendor].cmdWREN);
ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDSR,
flash_table[hw->flash_vendor].cmdRDSR);
ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WRSR,
flash_table[hw->flash_vendor].cmdWRSR);
ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_READ,
flash_table[hw->flash_vendor].cmdREAD);
}
/********************************************************************
* Performs basic configuration of the adapter.
*
* hw - Struct containing variables accessed by shared code
* Assumes that the controller has previously been reset and is in a
* post-reset uninitialized state. Initializes multicast table,
* and Calls routines to setup link
* Leaves the transmit and receive units disabled and uninitialized.
********************************************************************/
static s32 atl2_init_hw(struct atl2_hw *hw)
{
u32 ret_val = 0;
atl2_init_pcie(hw);
/* Zero out the Multicast HASH table */
/* clear the old settings from the multicast hash table */
ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
atl2_init_flash_opcode(hw);
ret_val = atl2_phy_init(hw);
return ret_val;
}
/*
* Detects the current speed and duplex settings of the hardware.
*
* hw - Struct containing variables accessed by shared code
* speed - Speed of the connection
* duplex - Duplex setting of the connection
*/
static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
u16 *duplex)
{
s32 ret_val;
u16 phy_data;
/* Read PHY Specific Status Register (17) */
ret_val = atl2_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
if (ret_val)
return ret_val;
if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
return ATLX_ERR_PHY_RES;
switch (phy_data & MII_ATLX_PSSR_SPEED) {
case MII_ATLX_PSSR_100MBS:
*speed = SPEED_100;
break;
case MII_ATLX_PSSR_10MBS:
*speed = SPEED_10;
break;
default:
return ATLX_ERR_PHY_SPEED;
}
if (phy_data & MII_ATLX_PSSR_DPLX)
*duplex = FULL_DUPLEX;
else
*duplex = HALF_DUPLEX;
return 0;
}
/*
* Reads the value from a PHY register
* hw - Struct containing variables accessed by shared code
* reg_addr - address of the PHY register to read
*/
static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data)
{
u32 val;
int i;
val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
MDIO_START |
MDIO_SUP_PREAMBLE |
MDIO_RW |
MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
wmb();
for (i = 0; i < MDIO_WAIT_TIMES; i++) {
udelay(2);
val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
if (!(val & (MDIO_START | MDIO_BUSY)))
break;
wmb();
}
if (!(val & (MDIO_START | MDIO_BUSY))) {
*phy_data = (u16)val;
return 0;
}
return ATLX_ERR_PHY;
}
/*
* Writes a value to a PHY register
* hw - Struct containing variables accessed by shared code
* reg_addr - address of the PHY register to write
* data - data to write to the PHY
*/
static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data)
{
int i;
u32 val;
val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
(reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
MDIO_SUP_PREAMBLE |
MDIO_START |
MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
wmb();
for (i = 0; i < MDIO_WAIT_TIMES; i++) {
udelay(2);
val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
if (!(val & (MDIO_START | MDIO_BUSY)))
break;
wmb();
}
if (!(val & (MDIO_START | MDIO_BUSY)))
return 0;
return ATLX_ERR_PHY;
}
/*
* Configures PHY autoneg and flow control advertisement settings
*
* hw - Struct containing variables accessed by shared code
*/
static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw)
{
s32 ret_val;
s16 mii_autoneg_adv_reg;
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
/* Need to parse autoneg_advertised and set up
* the appropriate PHY registers. First we will parse for
* autoneg_advertised software override. Since we can advertise
* a plethora of combinations, we need to check each bit
* individually.
*/
/* First we clear all the 10/100 mb speed bits in the Auto-Neg
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T Control Register (Address 9). */
mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
/* Need to parse MediaType and setup the
* appropriate PHY registers. */
switch (hw->MediaType) {
case MEDIA_TYPE_AUTO_SENSOR:
mii_autoneg_adv_reg |=
(MII_AR_10T_HD_CAPS |
MII_AR_10T_FD_CAPS |
MII_AR_100TX_HD_CAPS|
MII_AR_100TX_FD_CAPS);
hw->autoneg_advertised =
ADVERTISE_10_HALF |
ADVERTISE_10_FULL |
ADVERTISE_100_HALF|
ADVERTISE_100_FULL;
break;
case MEDIA_TYPE_100M_FULL:
mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
hw->autoneg_advertised = ADVERTISE_100_FULL;
break;
case MEDIA_TYPE_100M_HALF:
mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
hw->autoneg_advertised = ADVERTISE_100_HALF;
break;
case MEDIA_TYPE_10M_FULL:
mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
hw->autoneg_advertised = ADVERTISE_10_FULL;
break;
default:
mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
hw->autoneg_advertised = ADVERTISE_10_HALF;
break;
}
/* flow control fixed to enable all */
mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
return 0;
}
/*
* Resets the PHY and make all config validate
*
* hw - Struct containing variables accessed by shared code
*
* Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
*/
static s32 atl2_phy_commit(struct atl2_hw *hw)
{
s32 ret_val;
u16 phy_data;
phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
ret_val = atl2_write_phy_reg(hw, MII_BMCR, phy_data);
if (ret_val) {
u32 val;
int i;
/* pcie serdes link may be down ! */
for (i = 0; i < 25; i++) {
msleep(1);
val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
if (!(val & (MDIO_START | MDIO_BUSY)))
break;
}
if (0 != (val & (MDIO_START | MDIO_BUSY))) {
printk(KERN_ERR "atl2: PCIe link down for at least 25ms !\n");
return ret_val;
}
}
return 0;
}
static s32 atl2_phy_init(struct atl2_hw *hw)
{
s32 ret_val;
u16 phy_val;
if (hw->phy_configured)
return 0;
/* Enable PHY */
ATL2_WRITE_REGW(hw, REG_PHY_ENABLE, 1);
ATL2_WRITE_FLUSH(hw);
msleep(1);
/* check if the PHY is in powersaving mode */
atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
/* 024E / 124E 0r 0274 / 1274 ? */
if (phy_val & 0x1000) {
phy_val &= ~0x1000;
atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val);
}
msleep(1);
/*Enable PHY LinkChange Interrupt */
ret_val = atl2_write_phy_reg(hw, 18, 0xC00);
if (ret_val)
return ret_val;
/* setup AutoNeg parameters */
ret_val = atl2_phy_setup_autoneg_adv(hw);
if (ret_val)
return ret_val;
/* SW.Reset & En-Auto-Neg to restart Auto-Neg */
ret_val = atl2_phy_commit(hw);
if (ret_val)
return ret_val;
hw->phy_configured = true;
return ret_val;
}
static void atl2_set_mac_addr(struct atl2_hw *hw)
{
u32 value;
/* 00-0B-6A-F6-00-DC
* 0: 6AF600DC 1: 000B
* low dword */
value = (((u32)hw->mac_addr[2]) << 24) |
(((u32)hw->mac_addr[3]) << 16) |
(((u32)hw->mac_addr[4]) << 8) |
(((u32)hw->mac_addr[5]));
ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
/* hight dword */
value = (((u32)hw->mac_addr[0]) << 8) |
(((u32)hw->mac_addr[1]));
ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
}
/*
* check_eeprom_exist
* return 0 if eeprom exist
*/
static int atl2_check_eeprom_exist(struct atl2_hw *hw)
{
u32 value;
value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
if (value & SPI_FLASH_CTRL_EN_VPD) {
value &= ~SPI_FLASH_CTRL_EN_VPD;
ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
}
value = ATL2_READ_REGW(hw, REG_PCIE_CAP_LIST);
return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
}
/* FIXME: This doesn't look right. -- CHS */
static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value)
{
return true;
}
static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue)
{
int i;
u32 Control;
if (Offset & 0x3)
return false; /* address do not align */
ATL2_WRITE_REG(hw, REG_VPD_DATA, 0);
Control = (Offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
ATL2_WRITE_REG(hw, REG_VPD_CAP, Control);
for (i = 0; i < 10; i++) {
msleep(2);
Control = ATL2_READ_REG(hw, REG_VPD_CAP);
if (Control & VPD_CAP_VPD_FLAG)
break;
}
if (Control & VPD_CAP_VPD_FLAG) {
*pValue = ATL2_READ_REG(hw, REG_VPD_DATA);
return true;
}
return false; /* timeout */
}
static void atl2_force_ps(struct atl2_hw *hw)
{
u16 phy_val;
atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val | 0x1000);
atl2_write_phy_reg(hw, MII_DBG_ADDR, 2);
atl2_write_phy_reg(hw, MII_DBG_DATA, 0x3000);
atl2_write_phy_reg(hw, MII_DBG_ADDR, 3);
atl2_write_phy_reg(hw, MII_DBG_DATA, 0);
}
/* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
#define ATL2_MAX_NIC 4
#define OPTION_UNSET -1
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
* over and over (plus this helps to avoid typo bugs).
*/
#define ATL2_PARAM_INIT {[0 ... ATL2_MAX_NIC] = OPTION_UNSET}
#ifndef module_param_array
/* Module Parameters are always initialized to -1, so that the driver
* can tell the difference between no user specified value or the
* user asking for the default value.
* The true default values are loaded in when atl2_check_options is called.
*
* This is a GCC extension to ANSI C.
* See the item "Labeled Elements in Initializers" in the section
* "Extensions to the C Language Family" of the GCC documentation.
*/
#define ATL2_PARAM(X, desc) \
static const int X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
MODULE_PARM_DESC(X, desc);
#else
#define ATL2_PARAM(X, desc) \
static int X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
#endif
/*
* Transmit Memory Size
* Valid Range: 64-2048
* Default Value: 128
*/
#define ATL2_MIN_TX_MEMSIZE 4 /* 4KB */
#define ATL2_MAX_TX_MEMSIZE 64 /* 64KB */
#define ATL2_DEFAULT_TX_MEMSIZE 8 /* 8KB */
ATL2_PARAM(TxMemSize, "Bytes of Transmit Memory");
/*
* Receive Memory Block Count
* Valid Range: 16-512
* Default Value: 128
*/
#define ATL2_MIN_RXD_COUNT 16
#define ATL2_MAX_RXD_COUNT 512
#define ATL2_DEFAULT_RXD_COUNT 64
ATL2_PARAM(RxMemBlock, "Number of receive memory block");
/*
* User Specified MediaType Override
*
* Valid Range: 0-5
* - 0 - auto-negotiate at all supported speeds
* - 1 - only link at 1000Mbps Full Duplex
* - 2 - only link at 100Mbps Full Duplex
* - 3 - only link at 100Mbps Half Duplex
* - 4 - only link at 10Mbps Full Duplex
* - 5 - only link at 10Mbps Half Duplex
* Default Value: 0
*/
ATL2_PARAM(MediaType, "MediaType Select");
/*
* Interrupt Moderate Timer in units of 2048 ns (~2 us)
* Valid Range: 10-65535
* Default Value: 45000(90ms)
*/
#define INT_MOD_DEFAULT_CNT 100 /* 200us */
#define INT_MOD_MAX_CNT 65000
#define INT_MOD_MIN_CNT 50
ATL2_PARAM(IntModTimer, "Interrupt Moderator Timer");
/*
* FlashVendor
* Valid Range: 0-2
* 0 - Atmel
* 1 - SST
* 2 - ST
*/
ATL2_PARAM(FlashVendor, "SPI Flash Vendor");
#define AUTONEG_ADV_DEFAULT 0x2F
#define AUTONEG_ADV_MASK 0x2F
#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
#define FLASH_VENDOR_DEFAULT 0
#define FLASH_VENDOR_MIN 0
#define FLASH_VENDOR_MAX 2
struct atl2_option {
enum { enable_option, range_option, list_option } type;
char *name;
char *err;
int def;
union {
struct { /* range_option info */
int min;
int max;
} r;
struct { /* list_option info */
int nr;
struct atl2_opt_list { int i; char *str; } *p;
} l;
} arg;
};
static int atl2_validate_option(int *value, struct atl2_option *opt)
{
int i;
struct atl2_opt_list *ent;
if (*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
switch (opt->type) {
case enable_option:
switch (*value) {
case OPTION_ENABLED:
printk(KERN_INFO "%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
printk(KERN_INFO "%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
printk(KERN_INFO "%s set to %i\n", opt->name, *value);
return 0;
}
break;
case list_option:
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
printk(KERN_INFO "%s\n", ent->str);
return 0;
}
}
break;
default:
BUG();
}
printk(KERN_INFO "Invalid %s specified (%i) %s\n",
opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
/**
* atl2_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
* This routine checks all command line parameters for valid user
* input. If an invalid value is given, or if no user specified
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
*/
static void atl2_check_options(struct atl2_adapter *adapter)
{
int val;
struct atl2_option opt;
int bd = adapter->bd_number;
if (bd >= ATL2_MAX_NIC) {
printk(KERN_NOTICE "Warning: no configuration for board #%i\n",
bd);
printk(KERN_NOTICE "Using defaults for all values\n");
#ifndef module_param_array
bd = ATL2_MAX_NIC;
#endif
}
/* Bytes of Transmit Memory */
opt.type = range_option;
opt.name = "Bytes of Transmit Memory";
opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_TX_MEMSIZE);
opt.def = ATL2_DEFAULT_TX_MEMSIZE;
opt.arg.r.min = ATL2_MIN_TX_MEMSIZE;
opt.arg.r.max = ATL2_MAX_TX_MEMSIZE;
#ifdef module_param_array
if (num_TxMemSize > bd) {
#endif
val = TxMemSize[bd];
atl2_validate_option(&val, &opt);
adapter->txd_ring_size = ((u32) val) * 1024;
#ifdef module_param_array
} else
adapter->txd_ring_size = ((u32)opt.def) * 1024;
#endif
/* txs ring size: */
adapter->txs_ring_size = adapter->txd_ring_size / 128;
if (adapter->txs_ring_size > 160)
adapter->txs_ring_size = 160;
/* Receive Memory Block Count */
opt.type = range_option;
opt.name = "Number of receive memory block";
opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_RXD_COUNT);
opt.def = ATL2_DEFAULT_RXD_COUNT;
opt.arg.r.min = ATL2_MIN_RXD_COUNT;
opt.arg.r.max = ATL2_MAX_RXD_COUNT;
#ifdef module_param_array
if (num_RxMemBlock > bd) {
#endif
val = RxMemBlock[bd];
atl2_validate_option(&val, &opt);
adapter->rxd_ring_size = (u32)val;
/* FIXME */
/* ((u16)val)&~1; */ /* even number */
#ifdef module_param_array
} else
adapter->rxd_ring_size = (u32)opt.def;
#endif
/* init RXD Flow control value */
adapter->hw.fc_rxd_hi = (adapter->rxd_ring_size / 8) * 7;
adapter->hw.fc_rxd_lo = (ATL2_MIN_RXD_COUNT / 8) >
(adapter->rxd_ring_size / 12) ? (ATL2_MIN_RXD_COUNT / 8) :
(adapter->rxd_ring_size / 12);
/* Interrupt Moderate Timer */
opt.type = range_option;
opt.name = "Interrupt Moderate Timer";
opt.err = "using default of " __MODULE_STRING(INT_MOD_DEFAULT_CNT);
opt.def = INT_MOD_DEFAULT_CNT;
opt.arg.r.min = INT_MOD_MIN_CNT;
opt.arg.r.max = INT_MOD_MAX_CNT;
#ifdef module_param_array
if (num_IntModTimer > bd) {
#endif
val = IntModTimer[bd];
atl2_validate_option(&val, &opt);
adapter->imt = (u16) val;
#ifdef module_param_array
} else
adapter->imt = (u16)(opt.def);
#endif
/* Flash Vendor */
opt.type = range_option;
opt.name = "SPI Flash Vendor";
opt.err = "using default of " __MODULE_STRING(FLASH_VENDOR_DEFAULT);
opt.def = FLASH_VENDOR_DEFAULT;
opt.arg.r.min = FLASH_VENDOR_MIN;
opt.arg.r.max = FLASH_VENDOR_MAX;
#ifdef module_param_array
if (num_FlashVendor > bd) {
#endif
val = FlashVendor[bd];
atl2_validate_option(&val, &opt);
adapter->hw.flash_vendor = (u8) val;
#ifdef module_param_array
} else
adapter->hw.flash_vendor = (u8)(opt.def);
#endif
/* MediaType */
opt.type = range_option;
opt.name = "Speed/Duplex Selection";
opt.err = "using default of " __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR);
opt.def = MEDIA_TYPE_AUTO_SENSOR;
opt.arg.r.min = MEDIA_TYPE_AUTO_SENSOR;
opt.arg.r.max = MEDIA_TYPE_10M_HALF;
#ifdef module_param_array
if (num_MediaType > bd) {
#endif
val = MediaType[bd];
atl2_validate_option(&val, &opt);
adapter->hw.MediaType = (u16) val;
#ifdef module_param_array
} else
adapter->hw.MediaType = (u16)(opt.def);
#endif
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_4936_0 |
crossvul-cpp_data_good_5691_0 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright Darryl Miles G7LED (dlm@g7led.demon.co.uk)
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/stat.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <net/netrom.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/ip.h>
#include <net/tcp_states.h>
#include <net/arp.h>
#include <linux/init.h>
static int nr_ndevs = 4;
int sysctl_netrom_default_path_quality = NR_DEFAULT_QUAL;
int sysctl_netrom_obsolescence_count_initialiser = NR_DEFAULT_OBS;
int sysctl_netrom_network_ttl_initialiser = NR_DEFAULT_TTL;
int sysctl_netrom_transport_timeout = NR_DEFAULT_T1;
int sysctl_netrom_transport_maximum_tries = NR_DEFAULT_N2;
int sysctl_netrom_transport_acknowledge_delay = NR_DEFAULT_T2;
int sysctl_netrom_transport_busy_delay = NR_DEFAULT_T4;
int sysctl_netrom_transport_requested_window_size = NR_DEFAULT_WINDOW;
int sysctl_netrom_transport_no_activity_timeout = NR_DEFAULT_IDLE;
int sysctl_netrom_routing_control = NR_DEFAULT_ROUTING;
int sysctl_netrom_link_fails_count = NR_DEFAULT_FAILS;
int sysctl_netrom_reset_circuit = NR_DEFAULT_RESET;
static unsigned short circuit = 0x101;
static HLIST_HEAD(nr_list);
static DEFINE_SPINLOCK(nr_list_lock);
static const struct proto_ops nr_proto_ops;
/*
* NETROM network devices are virtual network devices encapsulating NETROM
* frames into AX.25 which will be sent through an AX.25 device, so form a
* special "super class" of normal net devices; split their locks off into a
* separate class since they always nest.
*/
static struct lock_class_key nr_netdev_xmit_lock_key;
static struct lock_class_key nr_netdev_addr_lock_key;
static void nr_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
{
lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
}
static void nr_set_lockdep_key(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
}
/*
* Socket removal during an interrupt is now safe.
*/
static void nr_remove_socket(struct sock *sk)
{
spin_lock_bh(&nr_list_lock);
sk_del_node_init(sk);
spin_unlock_bh(&nr_list_lock);
}
/*
* Kill all bound sockets on a dropped device.
*/
static void nr_kill_by_device(struct net_device *dev)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list)
if (nr_sk(s)->device == dev)
nr_disconnect(s, ENETUNREACH);
spin_unlock_bh(&nr_list_lock);
}
/*
* Handle device status changes.
*/
static int nr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = (struct net_device *)ptr;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (event != NETDEV_DOWN)
return NOTIFY_DONE;
nr_kill_by_device(dev);
nr_rt_device_down(dev);
return NOTIFY_DONE;
}
/*
* Add a socket to the bound sockets list.
*/
static void nr_insert_socket(struct sock *sk)
{
spin_lock_bh(&nr_list_lock);
sk_add_node(sk, &nr_list);
spin_unlock_bh(&nr_list_lock);
}
/*
* Find a socket that wants to accept the Connect Request we just
* received.
*/
static struct sock *nr_find_listener(ax25_address *addr)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list)
if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
s->sk_state == TCP_LISTEN) {
bh_lock_sock(s);
goto found;
}
s = NULL;
found:
spin_unlock_bh(&nr_list_lock);
return s;
}
/*
* Find a connected NET/ROM socket given my circuit IDs.
*/
static struct sock *nr_find_socket(unsigned char index, unsigned char id)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list) {
struct nr_sock *nr = nr_sk(s);
if (nr->my_index == index && nr->my_id == id) {
bh_lock_sock(s);
goto found;
}
}
s = NULL;
found:
spin_unlock_bh(&nr_list_lock);
return s;
}
/*
* Find a connected NET/ROM socket given their circuit IDs.
*/
static struct sock *nr_find_peer(unsigned char index, unsigned char id,
ax25_address *dest)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list) {
struct nr_sock *nr = nr_sk(s);
if (nr->your_index == index && nr->your_id == id &&
!ax25cmp(&nr->dest_addr, dest)) {
bh_lock_sock(s);
goto found;
}
}
s = NULL;
found:
spin_unlock_bh(&nr_list_lock);
return s;
}
/*
* Find next free circuit ID.
*/
static unsigned short nr_find_next_circuit(void)
{
unsigned short id = circuit;
unsigned char i, j;
struct sock *sk;
for (;;) {
i = id / 256;
j = id % 256;
if (i != 0 && j != 0) {
if ((sk=nr_find_socket(i, j)) == NULL)
break;
bh_unlock_sock(sk);
}
id++;
}
return id;
}
/*
* Deferred destroy.
*/
void nr_destroy_socket(struct sock *);
/*
* Handler for deferred kills.
*/
static void nr_destroy_timer(unsigned long data)
{
struct sock *sk=(struct sock *)data;
bh_lock_sock(sk);
sock_hold(sk);
nr_destroy_socket(sk);
bh_unlock_sock(sk);
sock_put(sk);
}
/*
* This is called from user mode and the timers. Thus it protects itself
* against interrupt users but doesn't worry about being called during
* work. Once it is removed from the queue no interrupt or bottom half
* will touch it and we are (fairly 8-) ) safe.
*/
void nr_destroy_socket(struct sock *sk)
{
struct sk_buff *skb;
nr_remove_socket(sk);
nr_stop_heartbeat(sk);
nr_stop_t1timer(sk);
nr_stop_t2timer(sk);
nr_stop_t4timer(sk);
nr_stop_idletimer(sk);
nr_clear_queues(sk); /* Flush the queues */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/* Queue the unaccepted socket for death */
sock_set_flag(skb->sk, SOCK_DEAD);
nr_start_heartbeat(skb->sk);
nr_sk(skb->sk)->state = NR_STATE_0;
}
kfree_skb(skb);
}
if (sk_has_allocations(sk)) {
/* Defer: outstanding buffers */
sk->sk_timer.function = nr_destroy_timer;
sk->sk_timer.expires = jiffies + 2 * HZ;
add_timer(&sk->sk_timer);
} else
sock_put(sk);
}
/*
* Handling for system calls applied via the various interfaces to a
* NET/ROM socket object.
*/
static int nr_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
unsigned long opt;
if (level != SOL_NETROM)
return -ENOPROTOOPT;
if (optlen < sizeof(unsigned int))
return -EINVAL;
if (get_user(opt, (unsigned int __user *)optval))
return -EFAULT;
switch (optname) {
case NETROM_T1:
if (opt < 1 || opt > ULONG_MAX / HZ)
return -EINVAL;
nr->t1 = opt * HZ;
return 0;
case NETROM_T2:
if (opt < 1 || opt > ULONG_MAX / HZ)
return -EINVAL;
nr->t2 = opt * HZ;
return 0;
case NETROM_N2:
if (opt < 1 || opt > 31)
return -EINVAL;
nr->n2 = opt;
return 0;
case NETROM_T4:
if (opt < 1 || opt > ULONG_MAX / HZ)
return -EINVAL;
nr->t4 = opt * HZ;
return 0;
case NETROM_IDLE:
if (opt > ULONG_MAX / (60 * HZ))
return -EINVAL;
nr->idle = opt * 60 * HZ;
return 0;
default:
return -ENOPROTOOPT;
}
}
static int nr_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
int val = 0;
int len;
if (level != SOL_NETROM)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch (optname) {
case NETROM_T1:
val = nr->t1 / HZ;
break;
case NETROM_T2:
val = nr->t2 / HZ;
break;
case NETROM_N2:
val = nr->n2;
break;
case NETROM_T4:
val = nr->t4 / HZ;
break;
case NETROM_IDLE:
val = nr->idle / (60 * HZ);
break;
default:
return -ENOPROTOOPT;
}
len = min_t(unsigned int, len, sizeof(int));
if (put_user(len, optlen))
return -EFAULT;
return copy_to_user(optval, &val, len) ? -EFAULT : 0;
}
static int nr_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
lock_sock(sk);
if (sk->sk_state != TCP_LISTEN) {
memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
release_sock(sk);
return 0;
}
release_sock(sk);
return -EOPNOTSUPP;
}
static struct proto nr_proto = {
.name = "NETROM",
.owner = THIS_MODULE,
.obj_size = sizeof(struct nr_sock),
};
static int nr_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct nr_sock *nr;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
if (sock->type != SOCK_SEQPACKET || protocol != 0)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto);
if (sk == NULL)
return -ENOMEM;
nr = nr_sk(sk);
sock_init_data(sock, sk);
sock->ops = &nr_proto_ops;
sk->sk_protocol = protocol;
skb_queue_head_init(&nr->ack_queue);
skb_queue_head_init(&nr->reseq_queue);
skb_queue_head_init(&nr->frag_queue);
nr_init_timers(sk);
nr->t1 =
msecs_to_jiffies(sysctl_netrom_transport_timeout);
nr->t2 =
msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay);
nr->n2 =
msecs_to_jiffies(sysctl_netrom_transport_maximum_tries);
nr->t4 =
msecs_to_jiffies(sysctl_netrom_transport_busy_delay);
nr->idle =
msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout);
nr->window = sysctl_netrom_transport_requested_window_size;
nr->bpqext = 1;
nr->state = NR_STATE_0;
return 0;
}
static struct sock *nr_make_new(struct sock *osk)
{
struct sock *sk;
struct nr_sock *nr, *onr;
if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot);
if (sk == NULL)
return NULL;
nr = nr_sk(sk);
sock_init_data(NULL, sk);
sk->sk_type = osk->sk_type;
sk->sk_priority = osk->sk_priority;
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
sk->sk_state = TCP_ESTABLISHED;
sock_copy_flags(sk, osk);
skb_queue_head_init(&nr->ack_queue);
skb_queue_head_init(&nr->reseq_queue);
skb_queue_head_init(&nr->frag_queue);
nr_init_timers(sk);
onr = nr_sk(osk);
nr->t1 = onr->t1;
nr->t2 = onr->t2;
nr->n2 = onr->n2;
nr->t4 = onr->t4;
nr->idle = onr->idle;
nr->window = onr->window;
nr->device = onr->device;
nr->bpqext = onr->bpqext;
return sk;
}
static int nr_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct nr_sock *nr;
if (sk == NULL) return 0;
sock_hold(sk);
sock_orphan(sk);
lock_sock(sk);
nr = nr_sk(sk);
switch (nr->state) {
case NR_STATE_0:
case NR_STATE_1:
case NR_STATE_2:
nr_disconnect(sk, 0);
nr_destroy_socket(sk);
break;
case NR_STATE_3:
nr_clear_queues(sk);
nr->n2count = 0;
nr_write_internal(sk, NR_DISCREQ);
nr_start_t1timer(sk);
nr_stop_t2timer(sk);
nr_stop_t4timer(sk);
nr_stop_idletimer(sk);
nr->state = NR_STATE_2;
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DESTROY);
break;
default:
break;
}
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
return 0;
}
static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr;
struct net_device *dev;
ax25_uid_assoc *user;
ax25_address *source;
lock_sock(sk);
if (!sock_flag(sk, SOCK_ZAPPED)) {
release_sock(sk);
return -EINVAL;
}
if (addr_len < sizeof(struct sockaddr_ax25) || addr_len > sizeof(struct full_sockaddr_ax25)) {
release_sock(sk);
return -EINVAL;
}
if (addr_len < (addr->fsa_ax25.sax25_ndigis * sizeof(ax25_address) + sizeof(struct sockaddr_ax25))) {
release_sock(sk);
return -EINVAL;
}
if (addr->fsa_ax25.sax25_family != AF_NETROM) {
release_sock(sk);
return -EINVAL;
}
if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) {
release_sock(sk);
return -EADDRNOTAVAIL;
}
/*
* Only the super user can set an arbitrary user callsign.
*/
if (addr->fsa_ax25.sax25_ndigis == 1) {
if (!capable(CAP_NET_BIND_SERVICE)) {
dev_put(dev);
release_sock(sk);
return -EPERM;
}
nr->user_addr = addr->fsa_digipeater[0];
nr->source_addr = addr->fsa_ax25.sax25_call;
} else {
source = &addr->fsa_ax25.sax25_call;
user = ax25_findbyuid(current_euid());
if (user) {
nr->user_addr = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
release_sock(sk);
dev_put(dev);
return -EPERM;
}
nr->user_addr = *source;
}
nr->source_addr = *source;
}
nr->device = dev;
nr_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
dev_put(dev);
release_sock(sk);
return 0;
}
static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
struct sockaddr_ax25 *addr = (struct sockaddr_ax25 *)uaddr;
ax25_address *source = NULL;
ax25_uid_assoc *user;
struct net_device *dev;
int err = 0;
lock_sock(sk);
if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
goto out_release; /* Connect completed during a ERESTARTSYS event */
}
if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
err = -ECONNREFUSED;
goto out_release;
}
if (sk->sk_state == TCP_ESTABLISHED) {
err = -EISCONN; /* No reconnect on a seqpacket socket */
goto out_release;
}
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25)) {
err = -EINVAL;
goto out_release;
}
if (addr->sax25_family != AF_NETROM) {
err = -EINVAL;
goto out_release;
}
if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */
sock_reset_flag(sk, SOCK_ZAPPED);
if ((dev = nr_dev_first()) == NULL) {
err = -ENETUNREACH;
goto out_release;
}
source = (ax25_address *)dev->dev_addr;
user = ax25_findbyuid(current_euid());
if (user) {
nr->user_addr = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_ADMIN)) {
dev_put(dev);
err = -EPERM;
goto out_release;
}
nr->user_addr = *source;
}
nr->source_addr = *source;
nr->device = dev;
dev_put(dev);
nr_insert_socket(sk); /* Finish the bind */
}
nr->dest_addr = addr->sax25_call;
release_sock(sk);
circuit = nr_find_next_circuit();
lock_sock(sk);
nr->my_index = circuit / 256;
nr->my_id = circuit % 256;
circuit++;
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
sk->sk_state = TCP_SYN_SENT;
nr_establish_data_link(sk);
nr->state = NR_STATE_1;
nr_start_heartbeat(sk);
/* Now the loop */
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
err = -EINPROGRESS;
goto out_release;
}
/*
* A Connect Ack with Choke or timeout or failed routing will go to
* closed.
*/
if (sk->sk_state == TCP_SYN_SENT) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
}
if (sk->sk_state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
err = sock_error(sk); /* Always set at this point */
goto out_release;
}
sock->state = SS_CONNECTED;
out_release:
release_sock(sk);
return err;
}
static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sk_buff *skb;
struct sock *newsk;
DEFINE_WAIT(wait);
struct sock *sk;
int err = 0;
if ((sk = sock->sk) == NULL)
return -EINVAL;
lock_sock(sk);
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out_release;
}
if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out_release;
}
/*
* The write queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
if (flags & O_NONBLOCK) {
err = -EWOULDBLOCK;
break;
}
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
newsk = skb->sk;
sock_graft(newsk, newsock);
/* Now attach up the new socket */
kfree_skb(skb);
sk_acceptq_removed(sk);
out_release:
release_sock(sk);
return err;
}
static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct full_sockaddr_ax25 *sax = (struct full_sockaddr_ax25 *)uaddr;
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
lock_sock(sk);
if (peer != 0) {
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
return -ENOTCONN;
}
sax->fsa_ax25.sax25_family = AF_NETROM;
sax->fsa_ax25.sax25_ndigis = 1;
sax->fsa_ax25.sax25_call = nr->user_addr;
memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater));
sax->fsa_digipeater[0] = nr->dest_addr;
*uaddr_len = sizeof(struct full_sockaddr_ax25);
} else {
sax->fsa_ax25.sax25_family = AF_NETROM;
sax->fsa_ax25.sax25_ndigis = 0;
sax->fsa_ax25.sax25_call = nr->source_addr;
*uaddr_len = sizeof(struct sockaddr_ax25);
}
release_sock(sk);
return 0;
}
int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
{
struct sock *sk;
struct sock *make;
struct nr_sock *nr_make;
ax25_address *src, *dest, *user;
unsigned short circuit_index, circuit_id;
unsigned short peer_circuit_index, peer_circuit_id;
unsigned short frametype, flags, window, timeout;
int ret;
skb->sk = NULL; /* Initially we don't know who it's for */
/*
* skb->data points to the netrom frame start
*/
src = (ax25_address *)(skb->data + 0);
dest = (ax25_address *)(skb->data + 7);
circuit_index = skb->data[15];
circuit_id = skb->data[16];
peer_circuit_index = skb->data[17];
peer_circuit_id = skb->data[18];
frametype = skb->data[19] & 0x0F;
flags = skb->data[19] & 0xF0;
/*
* Check for an incoming IP over NET/ROM frame.
*/
if (frametype == NR_PROTOEXT &&
circuit_index == NR_PROTO_IP && circuit_id == NR_PROTO_IP) {
skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
skb_reset_transport_header(skb);
return nr_rx_ip(skb, dev);
}
/*
* Find an existing socket connection, based on circuit ID, if it's
* a Connect Request base it on their circuit ID.
*
* Circuit ID 0/0 is not valid but it could still be a "reset" for a
* circuit that no longer exists at the other end ...
*/
sk = NULL;
if (circuit_index == 0 && circuit_id == 0) {
if (frametype == NR_CONNACK && flags == NR_CHOKE_FLAG)
sk = nr_find_peer(peer_circuit_index, peer_circuit_id, src);
} else {
if (frametype == NR_CONNREQ)
sk = nr_find_peer(circuit_index, circuit_id, src);
else
sk = nr_find_socket(circuit_index, circuit_id);
}
if (sk != NULL) {
skb_reset_transport_header(skb);
if (frametype == NR_CONNACK && skb->len == 22)
nr_sk(sk)->bpqext = 1;
else
nr_sk(sk)->bpqext = 0;
ret = nr_process_rx_frame(sk, skb);
bh_unlock_sock(sk);
return ret;
}
/*
* Now it should be a CONNREQ.
*/
if (frametype != NR_CONNREQ) {
/*
* Here it would be nice to be able to send a reset but
* NET/ROM doesn't have one. We've tried to extend the protocol
* by sending NR_CONNACK | NR_CHOKE_FLAGS replies but that
* apparently kills BPQ boxes... :-(
* So now we try to follow the established behaviour of
* G8PZT's Xrouter which is sending packets with command type 7
* as an extension of the protocol.
*/
if (sysctl_netrom_reset_circuit &&
(frametype != NR_RESET || flags != 0))
nr_transmit_reset(skb, 1);
return 0;
}
sk = nr_find_listener(dest);
user = (ax25_address *)(skb->data + 21);
if (sk == NULL || sk_acceptq_is_full(sk) ||
(make = nr_make_new(sk)) == NULL) {
nr_transmit_refusal(skb, 0);
if (sk)
bh_unlock_sock(sk);
return 0;
}
window = skb->data[20];
skb->sk = make;
make->sk_state = TCP_ESTABLISHED;
/* Fill in his circuit details */
nr_make = nr_sk(make);
nr_make->source_addr = *dest;
nr_make->dest_addr = *src;
nr_make->user_addr = *user;
nr_make->your_index = circuit_index;
nr_make->your_id = circuit_id;
bh_unlock_sock(sk);
circuit = nr_find_next_circuit();
bh_lock_sock(sk);
nr_make->my_index = circuit / 256;
nr_make->my_id = circuit % 256;
circuit++;
/* Window negotiation */
if (window < nr_make->window)
nr_make->window = window;
/* L4 timeout negotiation */
if (skb->len == 37) {
timeout = skb->data[36] * 256 + skb->data[35];
if (timeout * HZ < nr_make->t1)
nr_make->t1 = timeout * HZ;
nr_make->bpqext = 1;
} else {
nr_make->bpqext = 0;
}
nr_write_internal(make, NR_CONNACK);
nr_make->condition = 0x00;
nr_make->vs = 0;
nr_make->va = 0;
nr_make->vr = 0;
nr_make->vl = 0;
nr_make->state = NR_STATE_3;
sk_acceptq_added(sk);
skb_queue_head(&sk->sk_receive_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb->len);
bh_unlock_sock(sk);
nr_insert_socket(make);
nr_start_heartbeat(make);
nr_start_idletimer(make);
return 1;
}
static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
struct sockaddr_ax25 *usax = (struct sockaddr_ax25 *)msg->msg_name;
int err;
struct sockaddr_ax25 sax;
struct sk_buff *skb;
unsigned char *asmptr;
int size;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
return -EINVAL;
lock_sock(sk);
if (sock_flag(sk, SOCK_ZAPPED)) {
err = -EADDRNOTAVAIL;
goto out;
}
if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
err = -EPIPE;
goto out;
}
if (nr->device == NULL) {
err = -ENETUNREACH;
goto out;
}
if (usax) {
if (msg->msg_namelen < sizeof(sax)) {
err = -EINVAL;
goto out;
}
sax = *usax;
if (ax25cmp(&nr->dest_addr, &sax.sax25_call) != 0) {
err = -EISCONN;
goto out;
}
if (sax.sax25_family != AF_NETROM) {
err = -EINVAL;
goto out;
}
} else {
if (sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
sax.sax25_family = AF_NETROM;
sax.sax25_call = nr->dest_addr;
}
/* Build a packet - the conventional user limit is 236 bytes. We can
do ludicrously large NetROM frames but must not overflow */
if (len > 65536) {
err = -EMSGSIZE;
goto out;
}
size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
goto out;
skb_reserve(skb, size - len);
skb_reset_transport_header(skb);
/*
* Push down the NET/ROM header
*/
asmptr = skb_push(skb, NR_TRANSPORT_LEN);
/* Build a NET/ROM Transport header */
*asmptr++ = nr->your_index;
*asmptr++ = nr->your_id;
*asmptr++ = 0; /* To be filled in later */
*asmptr++ = 0; /* Ditto */
*asmptr++ = NR_INFO;
/*
* Put the data on the end
*/
skb_put(skb, len);
/* User data follows immediately after the NET/ROM transport header */
if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) {
kfree_skb(skb);
err = -EFAULT;
goto out;
}
if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
err = -ENOTCONN;
goto out;
}
nr_output(sk, skb); /* Shove it onto the queue */
err = len;
out:
release_sock(sk);
return err;
}
static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
size_t copied;
struct sk_buff *skb;
int er;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
lock_sock(sk);
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
return -ENOTCONN;
}
/* Now we can treat all alike */
if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {
release_sock(sk);
return er;
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (er < 0) {
skb_free_datagram(sk, skb);
release_sock(sk);
return er;
}
if (sax != NULL) {
memset(sax, 0, sizeof(sax));
sax->sax25_family = AF_NETROM;
skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
AX25_ADDR_LEN);
}
msg->msg_namelen = sizeof(*sax);
skb_free_datagram(sk, skb);
release_sock(sk);
return copied;
}
static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *)arg;
int ret;
switch (cmd) {
case TIOCOUTQ: {
long amount;
lock_sock(sk);
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
release_sock(sk);
return put_user(amount, (int __user *)argp);
}
case TIOCINQ: {
struct sk_buff *skb;
long amount = 0L;
lock_sock(sk);
/* These two are safe on a single CPU system as only user tasks fiddle here */
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
release_sock(sk);
return put_user(amount, (int __user *)argp);
}
case SIOCGSTAMP:
lock_sock(sk);
ret = sock_get_timestamp(sk, argp);
release_sock(sk);
return ret;
case SIOCGSTAMPNS:
lock_sock(sk);
ret = sock_get_timestampns(sk, argp);
release_sock(sk);
return ret;
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFMETRIC:
case SIOCSIFMETRIC:
return -EINVAL;
case SIOCADDRT:
case SIOCDELRT:
case SIOCNRDECOBS:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
return nr_rt_ioctl(cmd, argp);
default:
return -ENOIOCTLCMD;
}
return 0;
}
#ifdef CONFIG_PROC_FS
static void *nr_info_start(struct seq_file *seq, loff_t *pos)
{
spin_lock_bh(&nr_list_lock);
return seq_hlist_start_head(&nr_list, *pos);
}
static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &nr_list, pos);
}
static void nr_info_stop(struct seq_file *seq, void *v)
{
spin_unlock_bh(&nr_list_lock);
}
static int nr_info_show(struct seq_file *seq, void *v)
{
struct sock *s = sk_entry(v);
struct net_device *dev;
struct nr_sock *nr;
const char *devname;
char buf[11];
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"user_addr dest_node src_node dev my your st vs vr va t1 t2 t4 idle n2 wnd Snd-Q Rcv-Q inode\n");
else {
bh_lock_sock(s);
nr = nr_sk(s);
if ((dev = nr->device) == NULL)
devname = "???";
else
devname = dev->name;
seq_printf(seq, "%-9s ", ax2asc(buf, &nr->user_addr));
seq_printf(seq, "%-9s ", ax2asc(buf, &nr->dest_addr));
seq_printf(seq,
"%-9s %-3s %02X/%02X %02X/%02X %2d %3d %3d %3d %3lu/%03lu %2lu/%02lu %3lu/%03lu %3lu/%03lu %2d/%02d %3d %5d %5d %ld\n",
ax2asc(buf, &nr->source_addr),
devname,
nr->my_index,
nr->my_id,
nr->your_index,
nr->your_id,
nr->state,
nr->vs,
nr->vr,
nr->va,
ax25_display_timer(&nr->t1timer) / HZ,
nr->t1 / HZ,
ax25_display_timer(&nr->t2timer) / HZ,
nr->t2 / HZ,
ax25_display_timer(&nr->t4timer) / HZ,
nr->t4 / HZ,
ax25_display_timer(&nr->idletimer) / (60 * HZ),
nr->idle / (60 * HZ),
nr->n2count,
nr->n2,
nr->window,
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
bh_unlock_sock(s);
}
return 0;
}
static const struct seq_operations nr_info_seqops = {
.start = nr_info_start,
.next = nr_info_next,
.stop = nr_info_stop,
.show = nr_info_show,
};
static int nr_info_open(struct inode *inode, struct file *file)
{
return seq_open(file, &nr_info_seqops);
}
static const struct file_operations nr_info_fops = {
.owner = THIS_MODULE,
.open = nr_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
static const struct net_proto_family nr_family_ops = {
.family = PF_NETROM,
.create = nr_create,
.owner = THIS_MODULE,
};
static const struct proto_ops nr_proto_ops = {
.family = PF_NETROM,
.owner = THIS_MODULE,
.release = nr_release,
.bind = nr_bind,
.connect = nr_connect,
.socketpair = sock_no_socketpair,
.accept = nr_accept,
.getname = nr_getname,
.poll = datagram_poll,
.ioctl = nr_ioctl,
.listen = nr_listen,
.shutdown = sock_no_shutdown,
.setsockopt = nr_setsockopt,
.getsockopt = nr_getsockopt,
.sendmsg = nr_sendmsg,
.recvmsg = nr_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static struct notifier_block nr_dev_notifier = {
.notifier_call = nr_device_event,
};
static struct net_device **dev_nr;
static struct ax25_protocol nr_pid = {
.pid = AX25_P_NETROM,
.func = nr_route_frame
};
static struct ax25_linkfail nr_linkfail_notifier = {
.func = nr_link_failed,
};
static int __init nr_proto_init(void)
{
int i;
int rc = proto_register(&nr_proto, 0);
if (rc != 0)
goto out;
if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
return -1;
}
dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL);
if (dev_nr == NULL) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
return -1;
}
for (i = 0; i < nr_ndevs; i++) {
char name[IFNAMSIZ];
struct net_device *dev;
sprintf(name, "nr%d", i);
dev = alloc_netdev(0, name, nr_setup);
if (!dev) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
goto fail;
}
dev->base_addr = i;
if (register_netdev(dev)) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
free_netdev(dev);
goto fail;
}
nr_set_lockdep_key(dev);
dev_nr[i] = dev;
}
if (sock_register(&nr_family_ops)) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
goto fail;
}
register_netdevice_notifier(&nr_dev_notifier);
ax25_register_pid(&nr_pid);
ax25_linkfail_register(&nr_linkfail_notifier);
#ifdef CONFIG_SYSCTL
nr_register_sysctl();
#endif
nr_loopback_init();
proc_create("nr", S_IRUGO, init_net.proc_net, &nr_info_fops);
proc_create("nr_neigh", S_IRUGO, init_net.proc_net, &nr_neigh_fops);
proc_create("nr_nodes", S_IRUGO, init_net.proc_net, &nr_nodes_fops);
out:
return rc;
fail:
while (--i >= 0) {
unregister_netdev(dev_nr[i]);
free_netdev(dev_nr[i]);
}
kfree(dev_nr);
proto_unregister(&nr_proto);
rc = -1;
goto out;
}
module_init(nr_proto_init);
module_param(nr_ndevs, int, 0);
MODULE_PARM_DESC(nr_ndevs, "number of NET/ROM devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
MODULE_DESCRIPTION("The amateur radio NET/ROM network and transport layer protocol");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_NETROM);
static void __exit nr_exit(void)
{
int i;
remove_proc_entry("nr", init_net.proc_net);
remove_proc_entry("nr_neigh", init_net.proc_net);
remove_proc_entry("nr_nodes", init_net.proc_net);
nr_loopback_clear();
nr_rt_free();
#ifdef CONFIG_SYSCTL
nr_unregister_sysctl();
#endif
ax25_linkfail_release(&nr_linkfail_notifier);
ax25_protocol_release(AX25_P_NETROM);
unregister_netdevice_notifier(&nr_dev_notifier);
sock_unregister(PF_NETROM);
for (i = 0; i < nr_ndevs; i++) {
struct net_device *dev = dev_nr[i];
if (dev) {
unregister_netdev(dev);
free_netdev(dev);
}
}
kfree(dev_nr);
proto_unregister(&nr_proto);
}
module_exit(nr_exit);
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_5691_0 |
crossvul-cpp_data_good_860_0 | /* $Id: upnpevents.c,v 1.39 2018/03/12 22:41:54 nanard Exp $ */
/* vim: tabstop=4 shiftwidth=4 noexpandtab
* MiniUPnP project
* http://miniupnp.free.fr/ or http://miniupnp.tuxfamily.org/
* (c) 2008-2018 Thomas Bernard
* This software is subject to the conditions detailed
* in the LICENCE file provided within the distribution */
#include <stdio.h>
#include <string.h>
#include <syslog.h>
#include <sys/queue.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <errno.h>
#include "config.h"
#if defined(LIB_UUID)
/* as found on linux */
#include <uuid/uuid.h>
#elif defined(BSD_UUID)
#include <uuid.h>
#endif /* LIB_UUID / BSD_UUID */
#include "upnpevents.h"
#include "miniupnpdpath.h"
#include "upnpglobalvars.h"
#include "upnpdescgen.h"
#include "upnputils.h"
#ifdef ENABLE_EVENTS
/*enum subscriber_service_enum {
EWanCFG = 1,
EWanIPC,
EL3F
};*/
/* stuctures definitions */
struct subscriber {
LIST_ENTRY(subscriber) entries;
struct upnp_event_notify * notify;
time_t timeout;
uint32_t seq;
enum subscriber_service_enum service;
char uuid[42];
char callback[];
};
struct upnp_event_notify {
LIST_ENTRY(upnp_event_notify) entries;
int s; /* socket */
enum { ECreated=1,
EConnecting,
ESending,
EWaitingForResponse,
EFinished,
EError } state;
struct subscriber * sub;
char * buffer;
int buffersize;
int tosend;
int sent;
const char * path;
#ifdef ENABLE_IPV6
int ipv6;
char addrstr[48];
#else
char addrstr[16];
#endif
char portstr[8];
};
/* prototypes */
static void
upnp_event_create_notify(struct subscriber * sub);
/* Subscriber list */
LIST_HEAD(listhead, subscriber) subscriberlist = { NULL };
/* notify list */
LIST_HEAD(listheadnotif, upnp_event_notify) notifylist = { NULL };
/* create a new subscriber */
static struct subscriber *
newSubscriber(const char * eventurl, const char * callback, int callbacklen)
{
struct subscriber * tmp;
if(!eventurl || !callback || !callbacklen)
return NULL;
tmp = calloc(1, sizeof(struct subscriber)+callbacklen+1);
if(!tmp)
return NULL;
if(strcmp(eventurl, WANCFG_EVENTURL)==0)
tmp->service = EWanCFG;
else if(strcmp(eventurl, WANIPC_EVENTURL)==0)
tmp->service = EWanIPC;
#ifdef ENABLE_L3F_SERVICE
else if(strcmp(eventurl, L3F_EVENTURL)==0)
tmp->service = EL3F;
#endif
#ifdef ENABLE_6FC_SERVICE
else if(strcmp(eventurl, WANIP6FC_EVENTURL)==0)
tmp->service = E6FC;
#endif
#ifdef ENABLE_DP_SERVICE
else if(strcmp(eventurl, DP_EVENTURL)==0)
tmp->service = EDP;
#endif
else {
free(tmp);
return NULL;
}
memcpy(tmp->callback, callback, callbacklen);
tmp->callback[callbacklen] = '\0';
#if defined(LIB_UUID)
{
uuid_t uuid;
uuid_generate(uuid);
memcpy(tmp->uuid, "uuid:", 5);
uuid_unparse(uuid, tmp->uuid + 5);
}
#elif defined(BSD_UUID)
{
uuid_t uuid;
uint32_t status;
uuid_create(&uuid, &status);
if(status != uuid_s_ok) {
syslog(LOG_ERR, "uuid_create() failed (%u)", status);
} else {
char * uuid_str;
uuid_to_string(&uuid, &uuid_str, &status);
if(status != uuid_s_ok) {
syslog(LOG_ERR, "uuid_to_string() failed (%u)", status);
} else {
if(strlen(uuid_str) != 36) {
syslog(LOG_ERR, "uuid_to_string() returned %s", uuid_str);
status = (uint32_t)-1;
} else {
memcpy(tmp->uuid, "uuid:", 5);
memcpy(tmp->uuid + 5, uuid_str, 36);
tmp->uuid[sizeof(tmp->uuid)-1] = '\0';
}
free(uuid_str);
}
}
if(status != uuid_s_ok) {
/* make a dummy uuid */
strncpy(tmp->uuid, uuidvalue_igd, sizeof(tmp->uuid));
tmp->uuid[sizeof(tmp->uuid)-1] = '\0';
snprintf(tmp->uuid+sizeof(tmp->uuid)-5, 5, "%04lx", random() & 0xffff);
}
}
#else
/* make a dummy uuid */
strncpy(tmp->uuid, uuidvalue_igd, sizeof(tmp->uuid));
tmp->uuid[sizeof(tmp->uuid)-1] = '\0';
snprintf(tmp->uuid+sizeof(tmp->uuid)-5, 5, "%04lx", random() & 0xffff);
#endif
return tmp;
}
/* creates a new subscriber and adds it to the subscriber list
* also initiate 1st notify
* TODO : add a check on the number of subscriber in order to
* prevent memory overflow... */
const char *
upnpevents_addSubscriber(const char * eventurl,
const char * callback, int callbacklen,
int timeout)
{
struct subscriber * tmp;
/*static char uuid[42];*/
/* "uuid:00000000-0000-0000-0000-000000000000"; 5+36+1=42bytes */
syslog(LOG_DEBUG, "addSubscriber(%s, %.*s, %d)",
eventurl, callbacklen, callback, timeout);
/*strncpy(uuid, uuidvalue, sizeof(uuid));
uuid[sizeof(uuid)-1] = '\0';*/
tmp = newSubscriber(eventurl, callback, callbacklen);
if(!tmp)
return NULL;
if(timeout)
tmp->timeout = upnp_time() + timeout;
LIST_INSERT_HEAD(&subscriberlist, tmp, entries);
upnp_event_create_notify(tmp);
return tmp->uuid;
}
/* renew a subscription (update the timeout) */
const char *
upnpevents_renewSubscription(const char * sid, int sidlen, int timeout)
{
struct subscriber * sub;
for(sub = subscriberlist.lh_first; sub != NULL; sub = sub->entries.le_next) {
if((sidlen == 41) && (memcmp(sid, sub->uuid, 41) == 0)) {
#ifdef UPNP_STRICT
/* check if the subscription already timeouted */
if(sub->timeout && upnp_time() > sub->timeout)
continue;
#endif
sub->timeout = (timeout ? upnp_time() + timeout : 0);
return sub->uuid;
}
}
return NULL;
}
int
upnpevents_removeSubscriber(const char * sid, int sidlen)
{
struct subscriber * sub;
if(!sid)
return -1;
for(sub = subscriberlist.lh_first; sub != NULL; sub = sub->entries.le_next) {
if((sidlen == 41) && (memcmp(sid, sub->uuid, 41) == 0)) {
if(sub->notify) {
sub->notify->sub = NULL;
}
LIST_REMOVE(sub, entries);
free(sub);
return 0;
}
}
return -1;
}
/* notifies all subscriber of a number of port mapping change
* or external ip address change */
void
upnp_event_var_change_notify(enum subscriber_service_enum service)
{
struct subscriber * sub;
for(sub = subscriberlist.lh_first; sub != NULL; sub = sub->entries.le_next) {
if(sub->service == service && sub->notify == NULL)
upnp_event_create_notify(sub);
}
}
/* create and add the notify object to the list */
static void
upnp_event_create_notify(struct subscriber * sub)
{
struct upnp_event_notify * obj;
/*struct timeval sock_timeout;*/
obj = calloc(1, sizeof(struct upnp_event_notify));
if(!obj) {
syslog(LOG_ERR, "%s: calloc(): %m", "upnp_event_create_notify");
return;
}
obj->sub = sub;
obj->state = ECreated;
#ifdef ENABLE_IPV6
obj->s = socket((obj->sub->callback[7] == '[') ? PF_INET6 : PF_INET,
SOCK_STREAM, 0);
#else
obj->s = socket(PF_INET, SOCK_STREAM, 0);
#endif
if(obj->s<0) {
syslog(LOG_ERR, "%s: socket(): %m", "upnp_event_create_notify");
goto error;
}
#if 0 /* does not work for non blocking connect() */
/* set timeout to 3 seconds */
sock_timeout.tv_sec = 3;
sock_timeout.tv_usec = 0;
if(setsockopt(obj->s, SOL_SOCKET, SO_RCVTIMEO, &sock_timeout, sizeof(struct timeval)) < 0) {
syslog(LOG_WARNING, "%s: setsockopt(SO_RCVTIMEO): %m",
"upnp_event_create_notify");
}
sock_timeout.tv_sec = 3;
sock_timeout.tv_usec = 0;
if(setsockopt(obj->s, SOL_SOCKET, SO_SNDTIMEO, &sock_timeout, sizeof(struct timeval)) < 0) {
syslog(LOG_WARNING, "%s: setsockopt(SO_SNDTIMEO): %m",
"upnp_event_create_notify");
}
#endif
/* set socket non blocking */
if(!set_non_blocking(obj->s)) {
syslog(LOG_ERR, "%s: set_non_blocking(): %m",
"upnp_event_create_notify");
goto error;
}
if(sub)
sub->notify = obj;
LIST_INSERT_HEAD(¬ifylist, obj, entries);
return;
error:
if(obj->s >= 0)
close(obj->s);
free(obj);
}
static void
upnp_event_notify_connect(struct upnp_event_notify * obj)
{
unsigned int i;
const char * p;
unsigned short port;
#ifdef ENABLE_IPV6
struct sockaddr_storage addr;
socklen_t addrlen;
#else
struct sockaddr_in addr;
socklen_t addrlen;
#endif
if(!obj)
return;
memset(&addr, 0, sizeof(addr));
i = 0;
if(obj->sub == NULL) {
obj->state = EError;
return;
}
p = obj->sub->callback;
p += 7; /* http:// */
#ifdef ENABLE_IPV6
if(*p == '[') { /* ip v6 */
obj->addrstr[i++] = '[';
p++;
obj->ipv6 = 1;
while(*p != ']' && i < (sizeof(obj->addrstr)-1))
obj->addrstr[i++] = *(p++);
if(*p == ']')
p++;
if(i < (sizeof(obj->addrstr)-1))
obj->addrstr[i++] = ']';
} else {
#endif
while(*p != '/' && *p != ':' && i < (sizeof(obj->addrstr)-1))
obj->addrstr[i++] = *(p++);
#ifdef ENABLE_IPV6
}
#endif
obj->addrstr[i] = '\0';
if(*p == ':') {
obj->portstr[0] = *p;
i = 1;
p++;
port = (unsigned short)atoi(p);
while(*p != '/') {
if(i<7) obj->portstr[i++] = *p;
p++;
}
obj->portstr[i] = 0;
} else {
port = 80;
obj->portstr[0] = '\0';
}
obj->path = p;
#ifdef ENABLE_IPV6
if(obj->ipv6) {
char addrstr_tmp[48];
struct sockaddr_in6 * sa = (struct sockaddr_in6 *)&addr;
sa->sin6_family = AF_INET6;
i = (int)strlen(obj->addrstr);
if(i > 2) {
i -= 2;
memcpy(addrstr_tmp, obj->addrstr + 1, i);
addrstr_tmp[i] = '\0';
inet_pton(AF_INET6, addrstr_tmp, &(sa->sin6_addr));
}
sa->sin6_port = htons(port);
addrlen = sizeof(struct sockaddr_in6);
} else {
struct sockaddr_in * sa = (struct sockaddr_in *)&addr;
sa->sin_family = AF_INET;
inet_pton(AF_INET, obj->addrstr, &(sa->sin_addr));
sa->sin_port = htons(port);
addrlen = sizeof(struct sockaddr_in);
}
#else
addr.sin_family = AF_INET;
inet_aton(obj->addrstr, &addr.sin_addr);
addr.sin_port = htons(port);
addrlen = sizeof(struct sockaddr_in);
#endif
syslog(LOG_DEBUG, "%s: '%s' %hu '%s'", "upnp_event_notify_connect",
obj->addrstr, port, obj->path);
obj->state = EConnecting;
if(connect(obj->s, (struct sockaddr *)&addr, addrlen) < 0) {
if(errno != EINPROGRESS && errno != EWOULDBLOCK) {
syslog(LOG_ERR, "%s: connect(%d, %s, %u): %m",
"upnp_event_notify_connect", obj->s,
obj->addrstr, addrlen);
obj->state = EError;
}
}
}
static void upnp_event_prepare(struct upnp_event_notify * obj)
{
static const char notifymsg[] =
"NOTIFY %s HTTP/1.1\r\n"
"Host: %s%s\r\n"
#if (UPNP_VERSION_MAJOR == 1) && (UPNP_VERSION_MINOR == 0)
"Content-Type: text/xml\r\n" /* UDA v1.0 */
#else
"Content-Type: text/xml; charset=\"utf-8\"\r\n" /* UDA v1.1 or later */
#endif
"Content-Length: %d\r\n"
"NT: upnp:event\r\n"
"NTS: upnp:propchange\r\n"
"SID: %s\r\n"
"SEQ: %u\r\n"
"Connection: close\r\n"
"Cache-Control: no-cache\r\n"
"\r\n"
"%.*s\r\n";
char * xml;
int l;
if(obj->sub == NULL) {
obj->state = EError;
return;
}
switch(obj->sub->service) {
case EWanCFG:
xml = getVarsWANCfg(&l);
break;
case EWanIPC:
xml = getVarsWANIPCn(&l);
break;
#ifdef ENABLE_L3F_SERVICE
case EL3F:
xml = getVarsL3F(&l);
break;
#endif
#ifdef ENABLE_6FC_SERVICE
case E6FC:
xml = getVars6FC(&l);
break;
#endif
#ifdef ENABLE_DP_SERVICE
case EDP:
xml = getVarsDP(&l);
break;
#endif
default:
xml = NULL;
l = 0;
}
obj->buffersize = 1024;
for (;;) {
obj->buffer = malloc(obj->buffersize);
if(!obj->buffer) {
syslog(LOG_ERR, "%s: malloc returned NULL", "upnp_event_prepare");
if(xml) {
free(xml);
}
obj->state = EError;
return;
}
obj->tosend = snprintf(obj->buffer, obj->buffersize, notifymsg,
obj->path, obj->addrstr, obj->portstr, l+2,
obj->sub->uuid, obj->sub->seq,
l, xml);
if (obj->tosend < 0) {
syslog(LOG_ERR, "%s: snprintf() failed", "upnp_event_prepare");
if(xml) {
free(xml);
}
obj->state = EError;
return;
} else if (obj->tosend < obj->buffersize) {
break; /* the buffer was large enough */
}
/* Try again with a buffer big enough */
free(obj->buffer);
obj->buffersize = obj->tosend + 1; /* reserve space for the final 0 */
}
if(xml) {
free(xml);
xml = NULL;
}
obj->state = ESending;
}
static void upnp_event_send(struct upnp_event_notify * obj)
{
int i;
syslog(LOG_DEBUG, "%s: sending event notify message to %s%s",
"upnp_event_send", obj->addrstr, obj->portstr);
syslog(LOG_DEBUG, "%s: msg: %s",
"upnp_event_send", obj->buffer + obj->sent);
i = send(obj->s, obj->buffer + obj->sent, obj->tosend - obj->sent, 0);
if(i<0) {
if(errno != EAGAIN && errno != EWOULDBLOCK && errno != EINTR) {
syslog(LOG_NOTICE, "%s: send(%s%s): %m", "upnp_event_send",
obj->addrstr, obj->portstr);
obj->state = EError;
return;
} else {
/* EAGAIN or EWOULDBLOCK or EINTR : no data sent */
i = 0;
}
}
if(i != (obj->tosend - obj->sent))
syslog(LOG_NOTICE, "%s: %d bytes send out of %d",
"upnp_event_send", i, obj->tosend - obj->sent);
obj->sent += i;
if(obj->sent == obj->tosend)
obj->state = EWaitingForResponse;
}
static void upnp_event_recv(struct upnp_event_notify * obj)
{
int n;
n = recv(obj->s, obj->buffer, obj->buffersize, 0);
if(n<0) {
if(errno != EAGAIN &&
errno != EWOULDBLOCK &&
errno != EINTR) {
syslog(LOG_ERR, "%s: recv(): %m", "upnp_event_recv");
obj->state = EError;
}
return;
}
syslog(LOG_DEBUG, "%s: (%dbytes) %.*s", "upnp_event_recv",
n, n, obj->buffer);
/* TODO : do something with the data recevied ?
* right now, n (number of bytes received) is ignored
* We may need to recv() more bytes. */
obj->state = EFinished;
if(obj->sub)
obj->sub->seq++;
}
static void
upnp_event_process_notify(struct upnp_event_notify * obj)
{
int err;
socklen_t len;
switch(obj->state) {
case EConnecting:
/* now connected or failed to connect */
len = sizeof(err);
if(getsockopt(obj->s, SOL_SOCKET, SO_ERROR, &err, &len) < 0) {
syslog(LOG_ERR, "%s: getsockopt: %m", "upnp_event_process_notify");
obj->state = EError;
break;
}
if(err != 0) {
errno = err;
syslog(LOG_WARNING, "%s: connect(%s%s): %m",
"upnp_event_process_notify",
obj->addrstr, obj->portstr);
obj->state = EError;
break;
}
upnp_event_prepare(obj);
if(obj->state == ESending)
upnp_event_send(obj);
break;
case ESending:
upnp_event_send(obj);
break;
case EWaitingForResponse:
upnp_event_recv(obj);
break;
case EFinished:
close(obj->s);
obj->s = -1;
break;
default:
syslog(LOG_ERR, "%s: unknown state", "upnp_event_process_notify");
}
}
void upnpevents_selectfds(fd_set *readset, fd_set *writeset, int * max_fd)
{
struct upnp_event_notify * obj;
for(obj = notifylist.lh_first; obj != NULL; obj = obj->entries.le_next) {
syslog(LOG_DEBUG, "upnpevents_selectfds: %p %d %d",
obj, obj->state, obj->s);
if(obj->s >= 0) {
switch(obj->state) {
case ECreated:
upnp_event_notify_connect(obj);
if(obj->state != EConnecting)
break;
case EConnecting:
case ESending:
FD_SET(obj->s, writeset);
if(obj->s > *max_fd)
*max_fd = obj->s;
break;
case EWaitingForResponse:
FD_SET(obj->s, readset);
if(obj->s > *max_fd)
*max_fd = obj->s;
break;
default:
;
}
}
}
}
void upnpevents_processfds(fd_set *readset, fd_set *writeset)
{
struct upnp_event_notify * obj;
struct upnp_event_notify * next;
struct subscriber * sub;
struct subscriber * subnext;
time_t curtime;
for(obj = notifylist.lh_first; obj != NULL; obj = obj->entries.le_next) {
syslog(LOG_DEBUG, "%s: %p %d %d %d %d",
"upnpevents_processfds", obj, obj->state, obj->s,
FD_ISSET(obj->s, readset), FD_ISSET(obj->s, writeset));
if(obj->s >= 0) {
if(FD_ISSET(obj->s, readset) || FD_ISSET(obj->s, writeset))
upnp_event_process_notify(obj);
}
}
obj = notifylist.lh_first;
while(obj != NULL) {
next = obj->entries.le_next;
if(obj->state == EError || obj->state == EFinished) {
if(obj->s >= 0) {
close(obj->s);
}
if(obj->sub)
obj->sub->notify = NULL;
/* remove also the subscriber from the list if there was an error */
if(obj->state == EError && obj->sub) {
syslog(LOG_ERR, "%s: %p, remove subscriber %s after an ERROR cb: %s",
"upnpevents_processfds", obj, obj->sub->uuid, obj->sub->callback);
LIST_REMOVE(obj->sub, entries);
free(obj->sub);
}
if(obj->buffer) {
free(obj->buffer);
}
LIST_REMOVE(obj, entries);
free(obj);
}
obj = next;
}
/* remove timeouted subscribers */
curtime = upnp_time();
for(sub = subscriberlist.lh_first; sub != NULL; ) {
subnext = sub->entries.le_next;
if(sub->timeout && curtime > sub->timeout && sub->notify == NULL) {
syslog(LOG_INFO, "subscriber timeouted : %u > %u SID=%s",
(unsigned)curtime, (unsigned)sub->timeout, sub->uuid);
LIST_REMOVE(sub, entries);
free(sub);
}
sub = subnext;
}
}
#ifdef USE_MINIUPNPDCTL
void write_events_details(int s) {
int n;
char buff[80];
struct upnp_event_notify * obj;
struct subscriber * sub;
write(s, "Events details :\n", 17);
for(obj = notifylist.lh_first; obj != NULL; obj = obj->entries.le_next) {
n = snprintf(buff, sizeof(buff), " %p sub=%p state=%d s=%d\n",
obj, obj->sub, obj->state, obj->s);
write(s, buff, n);
}
write(s, "Subscribers :\n", 14);
for(sub = subscriberlist.lh_first; sub != NULL; sub = sub->entries.le_next) {
n = snprintf(buff, sizeof(buff), " %p timeout=%d seq=%u service=%d\n",
sub, (int)sub->timeout, sub->seq, sub->service);
write(s, buff, n);
n = snprintf(buff, sizeof(buff), " notify=%p %s\n",
sub->notify, sub->uuid);
write(s, buff, n);
n = snprintf(buff, sizeof(buff), " %s\n",
sub->callback);
write(s, buff, n);
}
}
#endif
#endif
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_860_0 |
crossvul-cpp_data_bad_725_1 | /*
* Univention Directory Notifier
*
* Copyright 2004-2019 Univention GmbH
*
* http://www.univention.de/
*
* All rights reserved.
*
* The source code of this program is made available
* under the terms of the GNU Affero General Public License version 3
* (GNU AGPL V3) as published by the Free Software Foundation.
*
* Binary versions of this program provided by Univention to you as
* well as other copyrighted, protected or trademarked materials like
* Logos, graphics, fonts, specific documentations and configurations,
* cryptographic keys etc. are subject to a license agreement between
* you and Univention and not subject to the GNU AGPL V3.
*
* In the case you use this program under the terms of the GNU AGPL V3,
* the program is provided in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public
* License with the Debian GNU/Linux or Univention distribution in file
* /usr/share/common-licenses/AGPL-3; if not, see
* <http://www.gnu.org/licenses/>.
*/
#define __USE_GNU
#include <sys/types.h>
#include <sys/socket.h>
#include <stdio.h>
#include <netinet/in.h>
#include <sys/time.h>
#include <sys/ioctl.h>
#include <sys/un.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdint.h>
#include <univention/debug.h>
#include "notify.h"
#include "network.h"
#include "cache.h"
#include "sem.h"
extern int sem_id;
extern fd_set readfds;
extern NotifyId_t notify_last_id;
extern unsigned long SCHEMA_ID;
/* read one line from network packages*/
int get_network_line(char *packet, char *network_line)
{
int i=0;
memset(network_line, 0, 8192);
while ( packet[i] != '\0' && packet[i] != '\n' ) {
network_line[i]=packet[i];
i+=1;
}
if ( packet[i] == '\0' ) {
return 0;
}
if ( i == 0 ) {
network_line[i]='\0';
}
network_line[i+1]='\0';
return 1;
}
int data_on_connection(int fd, callback_remove_handler remove)
{
int nread;
char *network_packet;
char network_line[8192];
char *p;
unsigned long id;
char string[1024];
unsigned long msg_id = UINT32_MAX;
enum network_protocol version = network_client_get_version(fd);
ioctl(fd, FIONREAD, &nread);
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "new connection data = %d\n",nread);
if(nread == 0)
{
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_PROCESS, "%d failed, got 0 close connection to listener ", fd);
close(fd);
FD_CLR(fd, &readfds);
remove(fd);
network_client_dump ();
return 0;
}
if ( nread >= 8192 ) {
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ERROR, "%d failed, more than 8192 close connection to listener ", fd);
close(fd);
FD_CLR(fd, &readfds);
remove(fd);
return 0;
}
/* read the whole package */
network_packet=malloc((nread+1) * sizeof(char));
read(fd, network_packet, nread);
network_packet[nread]='\0';
memset(network_line, 0, 8192);
p=network_packet;
p_sem(sem_id);
while ( get_network_line(p, network_line) ) {
if ( strlen(network_line) > 0 ) {
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "line = [%s]",network_line);
}
if ( !strncmp(network_line, "MSGID: ", strlen("MSGID: ")) ) {
/* read message id */
msg_id=strtoul(&(network_line[strlen("MSGID: ")]), NULL, 10);
p+=strlen(network_line);
} else if ( !strncmp(network_line, "Version: ", strlen("Version: ")) ) {
char *head = network_line, *end;
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "RECV: VERSION");
version = strtoul(head + 9, &end, 10);
if (!head[9] || *end)
goto failed;
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "VERSION=%d", version);
if (version < network_procotol_version) {
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_PROCESS, "Forbidden VERSION=%d < %d, close connection to listener", version, network_procotol_version);
goto close;
} else if (version >= PROTOCOL_LAST) {
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_PROCESS, "Future VERSION=%d", version);
version = PROTOCOL_LAST - 1;
}
network_client_set_version(fd, version);
/* reset message id */
msg_id = UINT32_MAX;
p+=strlen(network_line);
} else if ( !strncmp(network_line, "Capabilities: ", strlen("Capabilities: ")) ) {
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "RECV: Capabilities");
if ( version > PROTOCOL_UNKNOWN ) {
memset(string, 0, sizeof(string));
snprintf(string, sizeof(string), "Version: %d\nCapabilities: \n\n", version);
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "SEND: %s", string);
write(fd, string, strlen(string));
} else {
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "Capabilities recv, but no version line");
}
p+=strlen(network_line);
} else if ( !strncmp(network_line, "GET_DN ", strlen("GET_DN ")) && msg_id != UINT32_MAX && network_client_get_version(fd) > 0) {
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "RECV: GET_DN");
id=strtoul(&(network_line[strlen("GET_DN ")]), NULL, 10);
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "id: %ld",id);
if ( id <= notify_last_id.id) {
char *dn_string = NULL;
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "try to read %ld from cache", id);
/* try to read from cache */
if ( (dn_string = notifier_cache_get(id)) == NULL ) {
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "%ld not found in cache", id);
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "%ld get one dn", id);
/* read from transaction file, because not in cache */
if( (dn_string=notify_transcation_get_one_dn ( id )) == NULL ) {
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "%ld failed ", id);
/* TODO: maybe close connection? */
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ERROR, "%d failed, close connection to listener ", fd);
close(fd);
FD_CLR(fd, &readfds);
remove(fd);
return 0;
}
}
if ( dn_string != NULL ) {
snprintf(string, sizeof(string), "MSGID: %ld\n%s\n\n",msg_id,dn_string);
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "--> %d: [%s]",fd, string);
write(fd, string, strlen(string));
free(dn_string);
}
} else {
/* set wanted id */
network_client_set_next_id(fd, id);
network_client_set_msg_id(fd, msg_id);
}
p+=strlen(network_line)+1;
msg_id = UINT32_MAX;
} else if (!strncmp(p, "WAIT_ID ", 8) && msg_id != UINT32_MAX && version >= PROTOCOL_3) {
char *head = network_line, *end;
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "RECV: WAIT_ID");
id = strtoul(head + 8, &end, 10);
if (!head[8] || *end)
goto failed;
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "id: %ld", id);
if (id <= notify_last_id.id) {
snprintf(string, sizeof(string), "MSGID: %ld\n%ld\n\n", msg_id, notify_last_id.id);
write(fd, string, strlen(string));
} else {
/* set wanted id */
network_client_set_next_id(fd, id);
network_client_set_msg_id(fd, msg_id);
}
p += strlen(network_line) + 1;
msg_id = UINT32_MAX;
} else if ( !strncmp(network_line, "GET_ID", strlen("GET_ID")) && msg_id != UINT32_MAX && network_client_get_version(fd) > 0) {
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "RECV: GET_ID");
memset(string, 0, sizeof(string));
snprintf(string, sizeof(string), "MSGID: %ld\n%ld\n\n",msg_id,notify_last_id.id);
write(fd, string, strlen(string));
p+=strlen(network_line)+1;
msg_id = UINT32_MAX;
} else if ( !strncmp(network_line, "GET_SCHEMA_ID", strlen("GET_SCHEMA_ID")) && msg_id != UINT32_MAX && network_client_get_version(fd) > 0) {
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "RECV: GET_SCHEMA_ID");
memset(string, 0, sizeof(string));
snprintf(string, sizeof(string), "MSGID: %ld\n%ld\n\n",msg_id,SCHEMA_ID);
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "--> %d: [%s]",fd, string);
write(fd, string, strlen(string));
p+=strlen(network_line)+1;
msg_id = UINT32_MAX;
} else if ( !strncmp(network_line, "ALIVE", strlen("ALIVE")) && msg_id != UINT32_MAX && network_client_get_version(fd) > 0) {
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "RECV: ALIVE");
snprintf(string, sizeof(string), "MSGID: %ld\nOKAY\n\n",msg_id);
write(fd, string, strlen(string));
p+=strlen(network_line)+1;
msg_id = UINT32_MAX;
} else {
p+=strlen(network_line);
if (strlen(network_line) == 0 ) {
p+=1;
} else {
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ERROR, "Drop package [%s]", network_line);
}
}
}
v_sem(sem_id);
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_ALL, "END Package");
network_client_dump ();
return 0;
failed:
univention_debug(UV_DEBUG_TRANSFILE, UV_DEBUG_PROCESS, "Failed parsing [%s]", p);
close:
close(fd);
FD_CLR(fd, &readfds);
remove(fd);
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_725_1 |
crossvul-cpp_data_bad_3839_0 | /* net/atm/common.c - ATM sockets (common part for PVC and SVC) */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/net.h> /* struct socket, struct proto_ops */
#include <linux/atm.h> /* ATM stuff */
#include <linux/atmdev.h>
#include <linux/socket.h> /* SOL_SOCKET */
#include <linux/errno.h> /* error codes */
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/time.h> /* struct timeval */
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <net/sock.h> /* struct sock */
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/atomic.h>
#include "resources.h" /* atm_find_dev */
#include "common.h" /* prototypes */
#include "protocols.h" /* atm_init_<transport> */
#include "addr.h" /* address registry */
#include "signaling.h" /* for WAITING and sigd_attach */
struct hlist_head vcc_hash[VCC_HTABLE_SIZE];
EXPORT_SYMBOL(vcc_hash);
DEFINE_RWLOCK(vcc_sklist_lock);
EXPORT_SYMBOL(vcc_sklist_lock);
static ATOMIC_NOTIFIER_HEAD(atm_dev_notify_chain);
static void __vcc_insert_socket(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
struct hlist_head *head = &vcc_hash[vcc->vci & (VCC_HTABLE_SIZE - 1)];
sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1);
sk_add_node(sk, head);
}
void vcc_insert_socket(struct sock *sk)
{
write_lock_irq(&vcc_sklist_lock);
__vcc_insert_socket(sk);
write_unlock_irq(&vcc_sklist_lock);
}
EXPORT_SYMBOL(vcc_insert_socket);
static void vcc_remove_socket(struct sock *sk)
{
write_lock_irq(&vcc_sklist_lock);
sk_del_node_init(sk);
write_unlock_irq(&vcc_sklist_lock);
}
static struct sk_buff *alloc_tx(struct atm_vcc *vcc, unsigned int size)
{
struct sk_buff *skb;
struct sock *sk = sk_atm(vcc);
if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) {
pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
sk_wmem_alloc_get(sk), size, sk->sk_sndbuf);
return NULL;
}
while (!(skb = alloc_skb(size, GFP_KERNEL)))
schedule();
pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
atomic_add(skb->truesize, &sk->sk_wmem_alloc);
return skb;
}
static void vcc_sock_destruct(struct sock *sk)
{
if (atomic_read(&sk->sk_rmem_alloc))
printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n",
__func__, atomic_read(&sk->sk_rmem_alloc));
if (atomic_read(&sk->sk_wmem_alloc))
printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n",
__func__, atomic_read(&sk->sk_wmem_alloc));
}
static void vcc_def_wakeup(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
wake_up(&wq->wait);
rcu_read_unlock();
}
static inline int vcc_writable(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
return (vcc->qos.txtp.max_sdu +
atomic_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf;
}
static void vcc_write_space(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
if (vcc_writable(sk)) {
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
wake_up_interruptible(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
}
static struct proto vcc_proto = {
.name = "VCC",
.owner = THIS_MODULE,
.obj_size = sizeof(struct atm_vcc),
};
int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
{
struct sock *sk;
struct atm_vcc *vcc;
sock->sk = NULL;
if (sock->type == SOCK_STREAM)
return -EINVAL;
sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sk->sk_state_change = vcc_def_wakeup;
sk->sk_write_space = vcc_write_space;
vcc = atm_sk(sk);
vcc->dev = NULL;
memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc));
memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc));
vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
atomic_set(&sk->sk_wmem_alloc, 1);
atomic_set(&sk->sk_rmem_alloc, 0);
vcc->push = NULL;
vcc->pop = NULL;
vcc->push_oam = NULL;
vcc->vpi = vcc->vci = 0; /* no VCI/VPI yet */
vcc->atm_options = vcc->aal_options = 0;
sk->sk_destruct = vcc_sock_destruct;
return 0;
}
static void vcc_destroy_socket(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
struct sk_buff *skb;
set_bit(ATM_VF_CLOSE, &vcc->flags);
clear_bit(ATM_VF_READY, &vcc->flags);
if (vcc->dev) {
if (vcc->dev->ops->close)
vcc->dev->ops->close(vcc);
if (vcc->push)
vcc->push(vcc, NULL); /* atmarpd has no push */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
atm_return(vcc, skb->truesize);
kfree_skb(skb);
}
module_put(vcc->dev->ops->owner);
atm_dev_put(vcc->dev);
}
vcc_remove_socket(sk);
}
int vcc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (sk) {
lock_sock(sk);
vcc_destroy_socket(sock->sk);
release_sock(sk);
sock_put(sk);
}
return 0;
}
void vcc_release_async(struct atm_vcc *vcc, int reply)
{
struct sock *sk = sk_atm(vcc);
set_bit(ATM_VF_CLOSE, &vcc->flags);
sk->sk_shutdown |= RCV_SHUTDOWN;
sk->sk_err = -reply;
clear_bit(ATM_VF_WAITING, &vcc->flags);
sk->sk_state_change(sk);
}
EXPORT_SYMBOL(vcc_release_async);
void vcc_process_recv_queue(struct atm_vcc *vcc)
{
struct sk_buff_head queue, *rq;
struct sk_buff *skb, *tmp;
unsigned long flags;
__skb_queue_head_init(&queue);
rq = &sk_atm(vcc)->sk_receive_queue;
spin_lock_irqsave(&rq->lock, flags);
skb_queue_splice_init(rq, &queue);
spin_unlock_irqrestore(&rq->lock, flags);
skb_queue_walk_safe(&queue, skb, tmp) {
__skb_unlink(skb, &queue);
vcc->push(vcc, skb);
}
}
EXPORT_SYMBOL(vcc_process_recv_queue);
void atm_dev_signal_change(struct atm_dev *dev, char signal)
{
pr_debug("%s signal=%d dev=%p number=%d dev->signal=%d\n",
__func__, signal, dev, dev->number, dev->signal);
/* atm driver sending invalid signal */
WARN_ON(signal < ATM_PHY_SIG_LOST || signal > ATM_PHY_SIG_FOUND);
if (dev->signal == signal)
return; /* no change */
dev->signal = signal;
atomic_notifier_call_chain(&atm_dev_notify_chain, signal, dev);
}
EXPORT_SYMBOL(atm_dev_signal_change);
void atm_dev_release_vccs(struct atm_dev *dev)
{
int i;
write_lock_irq(&vcc_sklist_lock);
for (i = 0; i < VCC_HTABLE_SIZE; i++) {
struct hlist_head *head = &vcc_hash[i];
struct hlist_node *node, *tmp;
struct sock *s;
struct atm_vcc *vcc;
sk_for_each_safe(s, node, tmp, head) {
vcc = atm_sk(s);
if (vcc->dev == dev) {
vcc_release_async(vcc, -EPIPE);
sk_del_node_init(s);
}
}
}
write_unlock_irq(&vcc_sklist_lock);
}
EXPORT_SYMBOL(atm_dev_release_vccs);
static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
{
int max_sdu;
if (!tp->traffic_class)
return 0;
switch (aal) {
case ATM_AAL0:
max_sdu = ATM_CELL_SIZE-1;
break;
case ATM_AAL34:
max_sdu = ATM_MAX_AAL34_PDU;
break;
default:
pr_warning("AAL problems ... (%d)\n", aal);
/* fall through */
case ATM_AAL5:
max_sdu = ATM_MAX_AAL5_PDU;
}
if (!tp->max_sdu)
tp->max_sdu = max_sdu;
else if (tp->max_sdu > max_sdu)
return -EINVAL;
if (!tp->max_cdv)
tp->max_cdv = ATM_MAX_CDV;
return 0;
}
static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
{
struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)];
struct hlist_node *node;
struct sock *s;
struct atm_vcc *walk;
sk_for_each(s, node, head) {
walk = atm_sk(s);
if (walk->dev != vcc->dev)
continue;
if (test_bit(ATM_VF_ADDR, &walk->flags) && walk->vpi == vpi &&
walk->vci == vci && ((walk->qos.txtp.traffic_class !=
ATM_NONE && vcc->qos.txtp.traffic_class != ATM_NONE) ||
(walk->qos.rxtp.traffic_class != ATM_NONE &&
vcc->qos.rxtp.traffic_class != ATM_NONE)))
return -EADDRINUSE;
}
/* allow VCCs with same VPI/VCI iff they don't collide on
TX/RX (but we may refuse such sharing for other reasons,
e.g. if protocol requires to have both channels) */
return 0;
}
static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
{
static short p; /* poor man's per-device cache */
static int c;
short old_p;
int old_c;
int err;
if (*vpi != ATM_VPI_ANY && *vci != ATM_VCI_ANY) {
err = check_ci(vcc, *vpi, *vci);
return err;
}
/* last scan may have left values out of bounds for current device */
if (*vpi != ATM_VPI_ANY)
p = *vpi;
else if (p >= 1 << vcc->dev->ci_range.vpi_bits)
p = 0;
if (*vci != ATM_VCI_ANY)
c = *vci;
else if (c < ATM_NOT_RSV_VCI || c >= 1 << vcc->dev->ci_range.vci_bits)
c = ATM_NOT_RSV_VCI;
old_p = p;
old_c = c;
do {
if (!check_ci(vcc, p, c)) {
*vpi = p;
*vci = c;
return 0;
}
if (*vci == ATM_VCI_ANY) {
c++;
if (c >= 1 << vcc->dev->ci_range.vci_bits)
c = ATM_NOT_RSV_VCI;
}
if ((c == ATM_NOT_RSV_VCI || *vci != ATM_VCI_ANY) &&
*vpi == ATM_VPI_ANY) {
p++;
if (p >= 1 << vcc->dev->ci_range.vpi_bits)
p = 0;
}
} while (old_p != p || old_c != c);
return -EADDRINUSE;
}
static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
int vci)
{
struct sock *sk = sk_atm(vcc);
int error;
if ((vpi != ATM_VPI_UNSPEC && vpi != ATM_VPI_ANY &&
vpi >> dev->ci_range.vpi_bits) || (vci != ATM_VCI_UNSPEC &&
vci != ATM_VCI_ANY && vci >> dev->ci_range.vci_bits))
return -EINVAL;
if (vci > 0 && vci < ATM_NOT_RSV_VCI && !capable(CAP_NET_BIND_SERVICE))
return -EPERM;
error = -ENODEV;
if (!try_module_get(dev->ops->owner))
return error;
vcc->dev = dev;
write_lock_irq(&vcc_sklist_lock);
if (test_bit(ATM_DF_REMOVED, &dev->flags) ||
(error = find_ci(vcc, &vpi, &vci))) {
write_unlock_irq(&vcc_sklist_lock);
goto fail_module_put;
}
vcc->vpi = vpi;
vcc->vci = vci;
__vcc_insert_socket(sk);
write_unlock_irq(&vcc_sklist_lock);
switch (vcc->qos.aal) {
case ATM_AAL0:
error = atm_init_aal0(vcc);
vcc->stats = &dev->stats.aal0;
break;
case ATM_AAL34:
error = atm_init_aal34(vcc);
vcc->stats = &dev->stats.aal34;
break;
case ATM_NO_AAL:
/* ATM_AAL5 is also used in the "0 for default" case */
vcc->qos.aal = ATM_AAL5;
/* fall through */
case ATM_AAL5:
error = atm_init_aal5(vcc);
vcc->stats = &dev->stats.aal5;
break;
default:
error = -EPROTOTYPE;
}
if (!error)
error = adjust_tp(&vcc->qos.txtp, vcc->qos.aal);
if (!error)
error = adjust_tp(&vcc->qos.rxtp, vcc->qos.aal);
if (error)
goto fail;
pr_debug("VCC %d.%d, AAL %d\n", vpi, vci, vcc->qos.aal);
pr_debug(" TX: %d, PCR %d..%d, SDU %d\n",
vcc->qos.txtp.traffic_class,
vcc->qos.txtp.min_pcr,
vcc->qos.txtp.max_pcr,
vcc->qos.txtp.max_sdu);
pr_debug(" RX: %d, PCR %d..%d, SDU %d\n",
vcc->qos.rxtp.traffic_class,
vcc->qos.rxtp.min_pcr,
vcc->qos.rxtp.max_pcr,
vcc->qos.rxtp.max_sdu);
if (dev->ops->open) {
error = dev->ops->open(vcc);
if (error)
goto fail;
}
return 0;
fail:
vcc_remove_socket(sk);
fail_module_put:
module_put(dev->ops->owner);
/* ensure we get dev module ref count correct */
vcc->dev = NULL;
return error;
}
int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
{
struct atm_dev *dev;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
pr_debug("(vpi %d, vci %d)\n", vpi, vci);
if (sock->state == SS_CONNECTED)
return -EISCONN;
if (sock->state != SS_UNCONNECTED)
return -EINVAL;
if (!(vpi || vci))
return -EINVAL;
if (vpi != ATM_VPI_UNSPEC && vci != ATM_VCI_UNSPEC)
clear_bit(ATM_VF_PARTIAL, &vcc->flags);
else
if (test_bit(ATM_VF_PARTIAL, &vcc->flags))
return -EINVAL;
pr_debug("(TX: cl %d,bw %d-%d,sdu %d; "
"RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n",
vcc->qos.txtp.traffic_class, vcc->qos.txtp.min_pcr,
vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_sdu,
vcc->qos.rxtp.traffic_class, vcc->qos.rxtp.min_pcr,
vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_sdu,
vcc->qos.aal == ATM_AAL5 ? "" :
vcc->qos.aal == ATM_AAL0 ? "" : " ??? code ",
vcc->qos.aal == ATM_AAL0 ? 0 : vcc->qos.aal);
if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
return -EBADFD;
if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS ||
vcc->qos.rxtp.traffic_class == ATM_ANYCLASS)
return -EINVAL;
if (likely(itf != ATM_ITF_ANY)) {
dev = try_then_request_module(atm_dev_lookup(itf),
"atm-device-%d", itf);
} else {
dev = NULL;
mutex_lock(&atm_dev_mutex);
if (!list_empty(&atm_devs)) {
dev = list_entry(atm_devs.next,
struct atm_dev, dev_list);
atm_dev_hold(dev);
}
mutex_unlock(&atm_dev_mutex);
}
if (!dev)
return -ENODEV;
error = __vcc_connect(vcc, dev, vpi, vci);
if (error) {
atm_dev_put(dev);
return error;
}
if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC)
set_bit(ATM_VF_PARTIAL, &vcc->flags);
if (test_bit(ATM_VF_READY, &ATM_SD(sock)->flags))
sock->state = SS_CONNECTED;
return 0;
}
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
struct sk_buff *skb;
int copied, error = -EINVAL;
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
/* only handle MSG_DONTWAIT and MSG_PEEK */
if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
return -EOPNOTSUPP;
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags))
return 0;
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error);
if (!skb)
return error;
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (error)
return error;
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
skb->truesize);
atm_return(vcc, skb->truesize);
}
skb_free_datagram(sk, skb);
return copied;
}
int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
size_t total_len)
{
struct sock *sk = sock->sk;
DEFINE_WAIT(wait);
struct atm_vcc *vcc;
struct sk_buff *skb;
int eff, error;
const void __user *buff;
int size;
lock_sock(sk);
if (sock->state != SS_CONNECTED) {
error = -ENOTCONN;
goto out;
}
if (m->msg_name) {
error = -EISCONN;
goto out;
}
if (m->msg_iovlen != 1) {
error = -ENOSYS; /* fix this later @@@ */
goto out;
}
buff = m->msg_iov->iov_base;
size = m->msg_iov->iov_len;
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags)) {
error = -EPIPE;
send_sig(SIGPIPE, current, 0);
goto out;
}
if (!size) {
error = 0;
goto out;
}
if (size < 0 || size > vcc->qos.txtp.max_sdu) {
error = -EMSGSIZE;
goto out;
}
eff = (size+3) & ~3; /* align to word boundary */
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
error = 0;
while (!(skb = alloc_tx(vcc, eff))) {
if (m->msg_flags & MSG_DONTWAIT) {
error = -EAGAIN;
break;
}
schedule();
if (signal_pending(current)) {
error = -ERESTARTSYS;
break;
}
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags)) {
error = -EPIPE;
send_sig(SIGPIPE, current, 0);
break;
}
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
finish_wait(sk_sleep(sk), &wait);
if (error)
goto out;
skb->dev = NULL; /* for paths shared with net_device interfaces */
ATM_SKB(skb)->atm_options = vcc->atm_options;
if (copy_from_user(skb_put(skb, size), buff, size)) {
kfree_skb(skb);
error = -EFAULT;
goto out;
}
if (eff != size)
memset(skb->data + size, 0, eff-size);
error = vcc->dev->ops->send(vcc, skb);
error = error ? error : size;
out:
release_sock(sk);
return error;
}
unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
unsigned int mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
vcc = ATM_SD(sock);
/* exceptional events */
if (sk->sk_err)
mask = POLLERR;
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags))
mask |= POLLHUP;
/* readable? */
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
/* writable? */
if (sock->state == SS_CONNECTING &&
test_bit(ATM_VF_WAITING, &vcc->flags))
return mask;
if (vcc->qos.txtp.traffic_class != ATM_NONE &&
vcc_writable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
return mask;
}
static int atm_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
{
int error;
/*
* Don't let the QoS change the already connected AAL type nor the
* traffic class.
*/
if (qos->aal != vcc->qos.aal ||
qos->rxtp.traffic_class != vcc->qos.rxtp.traffic_class ||
qos->txtp.traffic_class != vcc->qos.txtp.traffic_class)
return -EINVAL;
error = adjust_tp(&qos->txtp, qos->aal);
if (!error)
error = adjust_tp(&qos->rxtp, qos->aal);
if (error)
return error;
if (!vcc->dev->ops->change_qos)
return -EOPNOTSUPP;
if (sk_atm(vcc)->sk_family == AF_ATMPVC)
return vcc->dev->ops->change_qos(vcc, qos, ATM_MF_SET);
return svc_change_qos(vcc, qos);
}
static int check_tp(const struct atm_trafprm *tp)
{
/* @@@ Should be merged with adjust_tp */
if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS)
return 0;
if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr &&
!tp->max_pcr)
return -EINVAL;
if (tp->min_pcr == ATM_MAX_PCR)
return -EINVAL;
if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR &&
tp->min_pcr > tp->max_pcr)
return -EINVAL;
/*
* We allow pcr to be outside [min_pcr,max_pcr], because later
* adjustment may still push it in the valid range.
*/
return 0;
}
static int check_qos(const struct atm_qos *qos)
{
int error;
if (!qos->txtp.traffic_class && !qos->rxtp.traffic_class)
return -EINVAL;
if (qos->txtp.traffic_class != qos->rxtp.traffic_class &&
qos->txtp.traffic_class && qos->rxtp.traffic_class &&
qos->txtp.traffic_class != ATM_ANYCLASS &&
qos->rxtp.traffic_class != ATM_ANYCLASS)
return -EINVAL;
error = check_tp(&qos->txtp);
if (error)
return error;
return check_tp(&qos->rxtp);
}
int vcc_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct atm_vcc *vcc;
unsigned long value;
int error;
if (__SO_LEVEL_MATCH(optname, level) && optlen != __SO_SIZE(optname))
return -EINVAL;
vcc = ATM_SD(sock);
switch (optname) {
case SO_ATMQOS:
{
struct atm_qos qos;
if (copy_from_user(&qos, optval, sizeof(qos)))
return -EFAULT;
error = check_qos(&qos);
if (error)
return error;
if (sock->state == SS_CONNECTED)
return atm_change_qos(vcc, &qos);
if (sock->state != SS_UNCONNECTED)
return -EBADFD;
vcc->qos = qos;
set_bit(ATM_VF_HASQOS, &vcc->flags);
return 0;
}
case SO_SETCLP:
if (get_user(value, (unsigned long __user *)optval))
return -EFAULT;
if (value)
vcc->atm_options |= ATM_ATMOPT_CLP;
else
vcc->atm_options &= ~ATM_ATMOPT_CLP;
return 0;
default:
if (level == SOL_SOCKET)
return -EINVAL;
break;
}
if (!vcc->dev || !vcc->dev->ops->setsockopt)
return -EINVAL;
return vcc->dev->ops->setsockopt(vcc, level, optname, optval, optlen);
}
int vcc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct atm_vcc *vcc;
int len;
if (get_user(len, optlen))
return -EFAULT;
if (__SO_LEVEL_MATCH(optname, level) && len != __SO_SIZE(optname))
return -EINVAL;
vcc = ATM_SD(sock);
switch (optname) {
case SO_ATMQOS:
if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
return -EINVAL;
return copy_to_user(optval, &vcc->qos, sizeof(vcc->qos))
? -EFAULT : 0;
case SO_SETCLP:
return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 : 0,
(unsigned long __user *)optval) ? -EFAULT : 0;
case SO_ATMPVC:
{
struct sockaddr_atmpvc pvc;
if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
return -ENOTCONN;
pvc.sap_family = AF_ATMPVC;
pvc.sap_addr.itf = vcc->dev->number;
pvc.sap_addr.vpi = vcc->vpi;
pvc.sap_addr.vci = vcc->vci;
return copy_to_user(optval, &pvc, sizeof(pvc)) ? -EFAULT : 0;
}
default:
if (level == SOL_SOCKET)
return -EINVAL;
break;
}
if (!vcc->dev || !vcc->dev->ops->getsockopt)
return -EINVAL;
return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len);
}
int register_atmdevice_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&atm_dev_notify_chain, nb);
}
EXPORT_SYMBOL_GPL(register_atmdevice_notifier);
void unregister_atmdevice_notifier(struct notifier_block *nb)
{
atomic_notifier_chain_unregister(&atm_dev_notify_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_atmdevice_notifier);
static int __init atm_init(void)
{
int error;
error = proto_register(&vcc_proto, 0);
if (error < 0)
goto out;
error = atmpvc_init();
if (error < 0) {
pr_err("atmpvc_init() failed with %d\n", error);
goto out_unregister_vcc_proto;
}
error = atmsvc_init();
if (error < 0) {
pr_err("atmsvc_init() failed with %d\n", error);
goto out_atmpvc_exit;
}
error = atm_proc_init();
if (error < 0) {
pr_err("atm_proc_init() failed with %d\n", error);
goto out_atmsvc_exit;
}
error = atm_sysfs_init();
if (error < 0) {
pr_err("atm_sysfs_init() failed with %d\n", error);
goto out_atmproc_exit;
}
out:
return error;
out_atmproc_exit:
atm_proc_exit();
out_atmsvc_exit:
atmsvc_exit();
out_atmpvc_exit:
atmsvc_exit();
out_unregister_vcc_proto:
proto_unregister(&vcc_proto);
goto out;
}
static void __exit atm_exit(void)
{
atm_proc_exit();
atm_sysfs_exit();
atmsvc_exit();
atmpvc_exit();
proto_unregister(&vcc_proto);
}
subsys_initcall(atm_init);
module_exit(atm_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_ATMPVC);
MODULE_ALIAS_NETPROTO(PF_ATMSVC);
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_3839_0 |
crossvul-cpp_data_good_2575_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% JJJJJ PPPP EEEEE GGGG %
% J P P E G %
% J PPPP EEE G GG %
% J J P E G G %
% JJJ P EEEEE GGG %
% %
% %
% Read/Write JPEG Image Format %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% This software is based in part on the work of the Independent JPEG Group.
% See ftp://ftp.uu.net/graphics/jpeg/jpegsrc.v6b.tar.gz for copyright and
% licensing restrictions. Blob support contributed by Glenn Randers-Pehrson.
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/colormap-private.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/constitute.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/option-private.h"
#include "magick/pixel-accessor.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/xml-tree.h"
#include <setjmp.h>
#if defined(MAGICKCORE_JPEG_DELEGATE)
#define JPEG_INTERNAL_OPTIONS
#if defined(__MINGW32__) || defined(__MINGW64__)
# define XMD_H 1 /* Avoid conflicting typedef for INT32 */
#endif
#undef HAVE_STDLIB_H
#include "jpeglib.h"
#include "jerror.h"
#endif
/*
Define declarations.
*/
#define ICC_MARKER (JPEG_APP0+2)
#define ICC_PROFILE "ICC_PROFILE"
#define IPTC_MARKER (JPEG_APP0+13)
#define XML_MARKER (JPEG_APP0+1)
#define MaxBufferExtent 16384
/*
Typedef declarations.
*/
#if defined(MAGICKCORE_JPEG_DELEGATE)
typedef struct _DestinationManager
{
struct jpeg_destination_mgr
manager;
Image
*image;
JOCTET
*buffer;
} DestinationManager;
typedef struct _ErrorManager
{
Image
*image;
MagickBooleanType
finished;
StringInfo
*profile;
jmp_buf
error_recovery;
} ErrorManager;
typedef struct _SourceManager
{
struct jpeg_source_mgr
manager;
Image
*image;
JOCTET
*buffer;
boolean
start_of_blob;
} SourceManager;
#endif
typedef struct _QuantizationTable
{
char
*slot,
*description;
size_t
width,
height;
double
divisor;
unsigned int
*levels;
} QuantizationTable;
/*
Forward declarations.
*/
#if defined(MAGICKCORE_JPEG_DELEGATE)
static MagickBooleanType
WriteJPEGImage(const ImageInfo *,Image *);
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s J P E G %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsJPEG() returns MagickTrue if the image format type, identified by the
% magick string, is JPEG.
%
% The format of the IsJPEG method is:
%
% MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length)
{
if (length < 3)
return(MagickFalse);
if (memcmp(magick,"\377\330\377",3) == 0)
return(MagickTrue);
return(MagickFalse);
}
#if defined(MAGICKCORE_JPEG_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d J P E G I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadJPEGImage() reads a JPEG image file and returns it. It allocates
% the memory necessary for the new Image structure and returns a pointer to
% the new image.
%
% The format of the ReadJPEGImage method is:
%
% Image *ReadJPEGImage(const ImageInfo *image_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static boolean FillInputBuffer(j_decompress_ptr cinfo)
{
SourceManager
*source;
source=(SourceManager *) cinfo->src;
source->manager.bytes_in_buffer=(size_t) ReadBlob(source->image,
MaxBufferExtent,source->buffer);
if (source->manager.bytes_in_buffer == 0)
{
if (source->start_of_blob != FALSE)
ERREXIT(cinfo,JERR_INPUT_EMPTY);
WARNMS(cinfo,JWRN_JPEG_EOF);
source->buffer[0]=(JOCTET) 0xff;
source->buffer[1]=(JOCTET) JPEG_EOI;
source->manager.bytes_in_buffer=2;
}
source->manager.next_input_byte=source->buffer;
source->start_of_blob=FALSE;
return(TRUE);
}
static int GetCharacter(j_decompress_ptr jpeg_info)
{
if (jpeg_info->src->bytes_in_buffer == 0)
(void) (*jpeg_info->src->fill_input_buffer)(jpeg_info);
jpeg_info->src->bytes_in_buffer--;
return((int) GETJOCTET(*jpeg_info->src->next_input_byte++));
}
static void InitializeSource(j_decompress_ptr cinfo)
{
SourceManager
*source;
source=(SourceManager *) cinfo->src;
source->start_of_blob=TRUE;
}
static MagickBooleanType IsITUFaxImage(const Image *image)
{
const StringInfo
*profile;
const unsigned char
*datum;
profile=GetImageProfile(image,"8bim");
if (profile == (const StringInfo *) NULL)
return(MagickFalse);
if (GetStringInfoLength(profile) < 5)
return(MagickFalse);
datum=GetStringInfoDatum(profile);
if ((datum[0] == 0x47) && (datum[1] == 0x33) && (datum[2] == 0x46) &&
(datum[3] == 0x41) && (datum[4] == 0x58))
return(MagickTrue);
return(MagickFalse);
}
static void JPEGErrorHandler(j_common_ptr jpeg_info)
{
char
message[JMSG_LENGTH_MAX];
ErrorManager
*error_manager;
Image
*image;
*message='\0';
error_manager=(ErrorManager *) jpeg_info->client_data;
image=error_manager->image;
(jpeg_info->err->format_message)(jpeg_info,message);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"[%s] JPEG Trace: \"%s\"",image->filename,message);
if (error_manager->finished != MagickFalse)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CorruptImageWarning,(char *) message,"`%s'",image->filename);
else
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CorruptImageError,(char *) message,"`%s'",image->filename);
longjmp(error_manager->error_recovery,1);
}
static MagickBooleanType JPEGWarningHandler(j_common_ptr jpeg_info,int level)
{
#define JPEGExcessiveWarnings 1000
char
message[JMSG_LENGTH_MAX];
ErrorManager
*error_manager;
Image
*image;
*message='\0';
error_manager=(ErrorManager *) jpeg_info->client_data;
image=error_manager->image;
if (level < 0)
{
/*
Process warning message.
*/
(jpeg_info->err->format_message)(jpeg_info,message);
if (jpeg_info->err->num_warnings++ < JPEGExcessiveWarnings)
ThrowBinaryException(CorruptImageWarning,(char *) message,
image->filename);
}
else
if ((image->debug != MagickFalse) && (level >= jpeg_info->err->trace_level))
{
/*
Process trace message.
*/
(jpeg_info->err->format_message)(jpeg_info,message);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"[%s] JPEG Trace: \"%s\"",image->filename,message);
}
return(MagickTrue);
}
static boolean ReadComment(j_decompress_ptr jpeg_info)
{
ErrorManager
*error_manager;
Image
*image;
register unsigned char
*p;
register ssize_t
i;
size_t
length;
StringInfo
*comment;
/*
Determine length of comment.
*/
error_manager=(ErrorManager *) jpeg_info->client_data;
image=error_manager->image;
length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8);
length+=GetCharacter(jpeg_info);
if (length <= 2)
return(TRUE);
length-=2;
comment=BlobToStringInfo((const void *) NULL,length);
if (comment == (StringInfo *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
/*
Read comment.
*/
error_manager->profile=comment;
p=GetStringInfoDatum(comment);
for (i=0; i < (ssize_t) GetStringInfoLength(comment); i++)
*p++=(unsigned char) GetCharacter(jpeg_info);
*p='\0';
error_manager->profile=NULL;
p=GetStringInfoDatum(comment);
(void) SetImageProperty(image,"comment",(const char *) p);
comment=DestroyStringInfo(comment);
return(TRUE);
}
static boolean ReadICCProfile(j_decompress_ptr jpeg_info)
{
char
magick[12];
ErrorManager
*error_manager;
Image
*image;
MagickBooleanType
status;
register ssize_t
i;
register unsigned char
*p;
size_t
length;
StringInfo
*icc_profile,
*profile;
/*
Read color profile.
*/
length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8);
length+=(size_t) GetCharacter(jpeg_info);
length-=2;
if (length <= 14)
{
while (length-- > 0)
(void) GetCharacter(jpeg_info);
return(TRUE);
}
for (i=0; i < 12; i++)
magick[i]=(char) GetCharacter(jpeg_info);
if (LocaleCompare(magick,ICC_PROFILE) != 0)
{
/*
Not a ICC profile, return.
*/
for (i=0; i < (ssize_t) (length-12); i++)
(void) GetCharacter(jpeg_info);
return(TRUE);
}
(void) GetCharacter(jpeg_info); /* id */
(void) GetCharacter(jpeg_info); /* markers */
length-=14;
error_manager=(ErrorManager *) jpeg_info->client_data;
image=error_manager->image;
profile=BlobToStringInfo((const void *) NULL,length);
if (profile == (StringInfo *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
error_manager->profile=profile;
p=GetStringInfoDatum(profile);
for (i=(ssize_t) GetStringInfoLength(profile)-1; i >= 0; i--)
*p++=(unsigned char) GetCharacter(jpeg_info);
error_manager->profile=NULL;
icc_profile=(StringInfo *) GetImageProfile(image,"icc");
if (icc_profile != (StringInfo *) NULL)
{
ConcatenateStringInfo(icc_profile,profile);
profile=DestroyStringInfo(profile);
}
else
{
status=SetImageProfile(image,"icc",profile);
profile=DestroyStringInfo(profile);
if (status == MagickFalse)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Profile: ICC, %.20g bytes",(double) length);
return(TRUE);
}
static boolean ReadIPTCProfile(j_decompress_ptr jpeg_info)
{
char
magick[MaxTextExtent];
ErrorManager
*error_manager;
Image
*image;
MagickBooleanType
status;
register ssize_t
i;
register unsigned char
*p;
size_t
length;
StringInfo
*iptc_profile,
*profile;
/*
Determine length of binary data stored here.
*/
length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8);
length+=(size_t) GetCharacter(jpeg_info);
length-=2;
if (length <= 14)
{
while (length-- > 0)
(void) GetCharacter(jpeg_info);
return(TRUE);
}
/*
Validate that this was written as a Photoshop resource format slug.
*/
for (i=0; i < 10; i++)
magick[i]=(char) GetCharacter(jpeg_info);
magick[10]='\0';
length-=10;
if (length <= 10)
return(TRUE);
if (LocaleCompare(magick,"Photoshop ") != 0)
{
/*
Not a IPTC profile, return.
*/
for (i=0; i < (ssize_t) length; i++)
(void) GetCharacter(jpeg_info);
return(TRUE);
}
/*
Remove the version number.
*/
for (i=0; i < 4; i++)
(void) GetCharacter(jpeg_info);
if (length <= 11)
return(TRUE);
length-=4;
error_manager=(ErrorManager *) jpeg_info->client_data;
image=error_manager->image;
profile=BlobToStringInfo((const void *) NULL,length);
if (profile == (StringInfo *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
error_manager->profile=profile;
p=GetStringInfoDatum(profile);
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++)
*p++=(unsigned char) GetCharacter(jpeg_info);
error_manager->profile=NULL;
iptc_profile=(StringInfo *) GetImageProfile(image,"8bim");
if (iptc_profile != (StringInfo *) NULL)
{
ConcatenateStringInfo(iptc_profile,profile);
profile=DestroyStringInfo(profile);
}
else
{
status=SetImageProfile(image,"8bim",profile);
profile=DestroyStringInfo(profile);
if (status == MagickFalse)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Profile: iptc, %.20g bytes",(double) length);
return(TRUE);
}
static boolean ReadProfile(j_decompress_ptr jpeg_info)
{
char
name[MaxTextExtent];
const StringInfo
*previous_profile;
ErrorManager
*error_manager;
Image
*image;
int
marker;
MagickBooleanType
status;
register ssize_t
i;
register unsigned char
*p;
size_t
length;
StringInfo
*profile;
/*
Read generic profile.
*/
length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8);
length+=(size_t) GetCharacter(jpeg_info);
if (length <= 2)
return(TRUE);
length-=2;
marker=jpeg_info->unread_marker-JPEG_APP0;
(void) FormatLocaleString(name,MaxTextExtent,"APP%d",marker);
error_manager=(ErrorManager *) jpeg_info->client_data;
image=error_manager->image;
profile=BlobToStringInfo((const void *) NULL,length);
if (profile == (StringInfo *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
error_manager->profile=profile;
p=GetStringInfoDatum(profile);
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++)
*p++=(unsigned char) GetCharacter(jpeg_info);
error_manager->profile=NULL;
if (marker == 1)
{
p=GetStringInfoDatum(profile);
if ((length > 4) && (LocaleNCompare((char *) p,"exif",4) == 0))
(void) CopyMagickString(name,"exif",MaxTextExtent);
if ((length > 5) && (LocaleNCompare((char *) p,"http:",5) == 0))
{
ssize_t
j;
/*
Extract namespace from XMP profile.
*/
p=GetStringInfoDatum(profile);
for (j=0; j < (ssize_t) GetStringInfoLength(profile); j++)
{
if (*p == '\0')
break;
p++;
}
if (j < (ssize_t) GetStringInfoLength(profile))
(void) DestroyStringInfo(SplitStringInfo(profile,(size_t) (j+1)));
(void) CopyMagickString(name,"xmp",MaxTextExtent);
}
}
previous_profile=GetImageProfile(image,name);
if (previous_profile != (const StringInfo *) NULL)
{
size_t
length;
length=GetStringInfoLength(profile);
SetStringInfoLength(profile,GetStringInfoLength(profile)+
GetStringInfoLength(previous_profile));
(void) memmove(GetStringInfoDatum(profile)+
GetStringInfoLength(previous_profile),GetStringInfoDatum(profile),
length);
(void) memcpy(GetStringInfoDatum(profile),
GetStringInfoDatum(previous_profile),
GetStringInfoLength(previous_profile));
}
status=SetImageProfile(image,name,profile);
profile=DestroyStringInfo(profile);
if (status == MagickFalse)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(FALSE);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Profile: %s, %.20g bytes",name,(double) length);
return(TRUE);
}
static void SkipInputData(j_decompress_ptr cinfo,long number_bytes)
{
SourceManager
*source;
if (number_bytes <= 0)
return;
source=(SourceManager *) cinfo->src;
while (number_bytes > (long) source->manager.bytes_in_buffer)
{
number_bytes-=(long) source->manager.bytes_in_buffer;
(void) FillInputBuffer(cinfo);
}
source->manager.next_input_byte+=number_bytes;
source->manager.bytes_in_buffer-=number_bytes;
}
static void TerminateSource(j_decompress_ptr cinfo)
{
(void) cinfo;
}
static void JPEGSourceManager(j_decompress_ptr cinfo,Image *image)
{
SourceManager
*source;
cinfo->src=(struct jpeg_source_mgr *) (*cinfo->mem->alloc_small)
((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(SourceManager));
source=(SourceManager *) cinfo->src;
source->buffer=(JOCTET *) (*cinfo->mem->alloc_small)
((j_common_ptr) cinfo,JPOOL_IMAGE,MaxBufferExtent*sizeof(JOCTET));
source=(SourceManager *) cinfo->src;
source->manager.init_source=InitializeSource;
source->manager.fill_input_buffer=FillInputBuffer;
source->manager.skip_input_data=SkipInputData;
source->manager.resync_to_restart=jpeg_resync_to_restart;
source->manager.term_source=TerminateSource;
source->manager.bytes_in_buffer=0;
source->manager.next_input_byte=NULL;
source->image=image;
}
static void JPEGSetImageQuality(struct jpeg_decompress_struct *jpeg_info,
Image *image)
{
image->quality=UndefinedCompressionQuality;
#if defined(D_PROGRESSIVE_SUPPORTED)
if (image->compression == LosslessJPEGCompression)
{
image->quality=100;
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Quality: 100 (lossless)");
}
else
#endif
{
ssize_t
j,
qvalue,
sum;
register ssize_t
i;
/*
Determine the JPEG compression quality from the quantization tables.
*/
sum=0;
for (i=0; i < NUM_QUANT_TBLS; i++)
{
if (jpeg_info->quant_tbl_ptrs[i] != NULL)
for (j=0; j < DCTSIZE2; j++)
sum+=jpeg_info->quant_tbl_ptrs[i]->quantval[j];
}
if ((jpeg_info->quant_tbl_ptrs[0] != NULL) &&
(jpeg_info->quant_tbl_ptrs[1] != NULL))
{
ssize_t
hash[101] =
{
1020, 1015, 932, 848, 780, 735, 702, 679, 660, 645,
632, 623, 613, 607, 600, 594, 589, 585, 581, 571,
555, 542, 529, 514, 494, 474, 457, 439, 424, 410,
397, 386, 373, 364, 351, 341, 334, 324, 317, 309,
299, 294, 287, 279, 274, 267, 262, 257, 251, 247,
243, 237, 232, 227, 222, 217, 213, 207, 202, 198,
192, 188, 183, 177, 173, 168, 163, 157, 153, 148,
143, 139, 132, 128, 125, 119, 115, 108, 104, 99,
94, 90, 84, 79, 74, 70, 64, 59, 55, 49,
45, 40, 34, 30, 25, 20, 15, 11, 6, 4,
0
},
sums[101] =
{
32640, 32635, 32266, 31495, 30665, 29804, 29146, 28599, 28104,
27670, 27225, 26725, 26210, 25716, 25240, 24789, 24373, 23946,
23572, 22846, 21801, 20842, 19949, 19121, 18386, 17651, 16998,
16349, 15800, 15247, 14783, 14321, 13859, 13535, 13081, 12702,
12423, 12056, 11779, 11513, 11135, 10955, 10676, 10392, 10208,
9928, 9747, 9564, 9369, 9193, 9017, 8822, 8639, 8458,
8270, 8084, 7896, 7710, 7527, 7347, 7156, 6977, 6788,
6607, 6422, 6236, 6054, 5867, 5684, 5495, 5305, 5128,
4945, 4751, 4638, 4442, 4248, 4065, 3888, 3698, 3509,
3326, 3139, 2957, 2775, 2586, 2405, 2216, 2037, 1846,
1666, 1483, 1297, 1109, 927, 735, 554, 375, 201,
128, 0
};
qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+
jpeg_info->quant_tbl_ptrs[0]->quantval[53]+
jpeg_info->quant_tbl_ptrs[1]->quantval[0]+
jpeg_info->quant_tbl_ptrs[1]->quantval[DCTSIZE2-1]);
for (i=0; i < 100; i++)
{
if ((qvalue < hash[i]) && (sum < sums[i]))
continue;
if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50))
image->quality=(size_t) i+1;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) &&
(sum <= sums[i]) ? "exact" : "approximate");
break;
}
}
else
if (jpeg_info->quant_tbl_ptrs[0] != NULL)
{
ssize_t
hash[101] =
{
510, 505, 422, 380, 355, 338, 326, 318, 311, 305,
300, 297, 293, 291, 288, 286, 284, 283, 281, 280,
279, 278, 277, 273, 262, 251, 243, 233, 225, 218,
211, 205, 198, 193, 186, 181, 177, 172, 168, 164,
158, 156, 152, 148, 145, 142, 139, 136, 133, 131,
129, 126, 123, 120, 118, 115, 113, 110, 107, 105,
102, 100, 97, 94, 92, 89, 87, 83, 81, 79,
76, 74, 70, 68, 66, 63, 61, 57, 55, 52,
50, 48, 44, 42, 39, 37, 34, 31, 29, 26,
24, 21, 18, 16, 13, 11, 8, 6, 3, 2,
0
},
sums[101] =
{
16320, 16315, 15946, 15277, 14655, 14073, 13623, 13230, 12859,
12560, 12240, 11861, 11456, 11081, 10714, 10360, 10027, 9679,
9368, 9056, 8680, 8331, 7995, 7668, 7376, 7084, 6823,
6562, 6345, 6125, 5939, 5756, 5571, 5421, 5240, 5086,
4976, 4829, 4719, 4616, 4463, 4393, 4280, 4166, 4092,
3980, 3909, 3835, 3755, 3688, 3621, 3541, 3467, 3396,
3323, 3247, 3170, 3096, 3021, 2952, 2874, 2804, 2727,
2657, 2583, 2509, 2437, 2362, 2290, 2211, 2136, 2068,
1996, 1915, 1858, 1773, 1692, 1620, 1552, 1477, 1398,
1326, 1251, 1179, 1109, 1031, 961, 884, 814, 736,
667, 592, 518, 441, 369, 292, 221, 151, 86,
64, 0
};
qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+
jpeg_info->quant_tbl_ptrs[0]->quantval[53]);
for (i=0; i < 100; i++)
{
if ((qvalue < hash[i]) && (sum < sums[i]))
continue;
if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50))
image->quality=(size_t) i+1;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) &&
(sum <= sums[i]) ? "exact" : "approximate");
break;
}
}
}
}
static void JPEGSetImageSamplingFactor(struct jpeg_decompress_struct *jpeg_info, Image *image)
{
char
sampling_factor[MaxTextExtent];
switch (jpeg_info->out_color_space)
{
case JCS_CMYK:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: CMYK");
(void) FormatLocaleString(sampling_factor,MaxTextExtent,
"%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor,
jpeg_info->comp_info[0].v_samp_factor,
jpeg_info->comp_info[1].h_samp_factor,
jpeg_info->comp_info[1].v_samp_factor,
jpeg_info->comp_info[2].h_samp_factor,
jpeg_info->comp_info[2].v_samp_factor,
jpeg_info->comp_info[3].h_samp_factor,
jpeg_info->comp_info[3].v_samp_factor);
break;
}
case JCS_GRAYSCALE:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: GRAYSCALE");
(void) FormatLocaleString(sampling_factor,MaxTextExtent,"%dx%d",
jpeg_info->comp_info[0].h_samp_factor,
jpeg_info->comp_info[0].v_samp_factor);
break;
}
case JCS_RGB:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: RGB");
(void) FormatLocaleString(sampling_factor,MaxTextExtent,
"%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor,
jpeg_info->comp_info[0].v_samp_factor,
jpeg_info->comp_info[1].h_samp_factor,
jpeg_info->comp_info[1].v_samp_factor,
jpeg_info->comp_info[2].h_samp_factor,
jpeg_info->comp_info[2].v_samp_factor);
break;
}
default:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d",
jpeg_info->out_color_space);
(void) FormatLocaleString(sampling_factor,MaxTextExtent,
"%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor,
jpeg_info->comp_info[0].v_samp_factor,
jpeg_info->comp_info[1].h_samp_factor,
jpeg_info->comp_info[1].v_samp_factor,
jpeg_info->comp_info[2].h_samp_factor,
jpeg_info->comp_info[2].v_samp_factor,
jpeg_info->comp_info[3].h_samp_factor,
jpeg_info->comp_info[3].v_samp_factor);
break;
}
}
(void) SetImageProperty(image,"jpeg:sampling-factor",sampling_factor);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Sampling Factors: %s",
sampling_factor);
}
static Image *ReadJPEGImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
value[MaxTextExtent];
const char
*option;
ErrorManager
error_manager;
Image
*image;
IndexPacket
index;
JSAMPLE
*volatile jpeg_pixels;
JSAMPROW
scanline[1];
MagickBooleanType
debug,
status;
MagickSizeType
number_pixels;
MemoryInfo
*memory_info;
register ssize_t
i;
struct jpeg_decompress_struct
jpeg_info;
struct jpeg_error_mgr
jpeg_error;
register JSAMPLE
*p;
size_t
units;
ssize_t
y;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
debug=IsEventLogging();
(void) debug;
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Verify that file size large enough to contain a JPEG datastream.
*/
if (GetBlobSize(image) < 107)
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
/*
Initialize JPEG parameters.
*/
(void) ResetMagickMemory(&error_manager,0,sizeof(error_manager));
(void) ResetMagickMemory(&jpeg_info,0,sizeof(jpeg_info));
(void) ResetMagickMemory(&jpeg_error,0,sizeof(jpeg_error));
jpeg_info.err=jpeg_std_error(&jpeg_error);
jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler;
jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler;
memory_info=(MemoryInfo *) NULL;
error_manager.image=image;
if (setjmp(error_manager.error_recovery) != 0)
{
jpeg_destroy_decompress(&jpeg_info);
if (error_manager.profile != (StringInfo *) NULL)
error_manager.profile=DestroyStringInfo(error_manager.profile);
(void) CloseBlob(image);
number_pixels=(MagickSizeType) image->columns*image->rows;
if (number_pixels != 0)
return(GetFirstImageInList(image));
InheritException(exception,&image->exception);
return(DestroyImage(image));
}
jpeg_info.client_data=(void *) &error_manager;
jpeg_create_decompress(&jpeg_info);
JPEGSourceManager(&jpeg_info,image);
jpeg_set_marker_processor(&jpeg_info,JPEG_COM,ReadComment);
option=GetImageOption(image_info,"profile:skip");
if (IsOptionMember("ICC",option) == MagickFalse)
jpeg_set_marker_processor(&jpeg_info,ICC_MARKER,ReadICCProfile);
if (IsOptionMember("IPTC",option) == MagickFalse)
jpeg_set_marker_processor(&jpeg_info,IPTC_MARKER,ReadIPTCProfile);
for (i=1; i < 16; i++)
if ((i != 2) && (i != 13) && (i != 14))
if (IsOptionMember("APP",option) == MagickFalse)
jpeg_set_marker_processor(&jpeg_info,(int) (JPEG_APP0+i),ReadProfile);
i=(ssize_t) jpeg_read_header(&jpeg_info,TRUE);
if ((image_info->colorspace == YCbCrColorspace) ||
(image_info->colorspace == Rec601YCbCrColorspace) ||
(image_info->colorspace == Rec709YCbCrColorspace))
jpeg_info.out_color_space=JCS_YCbCr;
/*
Set image resolution.
*/
units=0;
if ((jpeg_info.saw_JFIF_marker != 0) && (jpeg_info.X_density != 1) &&
(jpeg_info.Y_density != 1))
{
image->x_resolution=(double) jpeg_info.X_density;
image->y_resolution=(double) jpeg_info.Y_density;
units=(size_t) jpeg_info.density_unit;
}
if (units == 1)
image->units=PixelsPerInchResolution;
if (units == 2)
image->units=PixelsPerCentimeterResolution;
number_pixels=(MagickSizeType) image->columns*image->rows;
option=GetImageOption(image_info,"jpeg:size");
if ((option != (const char *) NULL) &&
(jpeg_info.out_color_space != JCS_YCbCr))
{
double
scale_factor;
GeometryInfo
geometry_info;
MagickStatusType
flags;
/*
Scale the image.
*/
flags=ParseGeometry(option,&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
jpeg_calc_output_dimensions(&jpeg_info);
image->magick_columns=jpeg_info.output_width;
image->magick_rows=jpeg_info.output_height;
scale_factor=1.0;
if (geometry_info.rho != 0.0)
scale_factor=jpeg_info.output_width/geometry_info.rho;
if ((geometry_info.sigma != 0.0) &&
(scale_factor > (jpeg_info.output_height/geometry_info.sigma)))
scale_factor=jpeg_info.output_height/geometry_info.sigma;
jpeg_info.scale_num=1U;
jpeg_info.scale_denom=(unsigned int) scale_factor;
jpeg_calc_output_dimensions(&jpeg_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Scale factor: %.20g",(double) scale_factor);
}
#if (JPEG_LIB_VERSION >= 61) && defined(D_PROGRESSIVE_SUPPORTED)
#if defined(D_LOSSLESS_SUPPORTED)
image->interlace=jpeg_info.process == JPROC_PROGRESSIVE ?
JPEGInterlace : NoInterlace;
image->compression=jpeg_info.process == JPROC_LOSSLESS ?
LosslessJPEGCompression : JPEGCompression;
if (jpeg_info.data_precision > 8)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"12-bit JPEG not supported. Reducing pixel data to 8 bits","`%s'",
image->filename);
if (jpeg_info.data_precision == 16)
jpeg_info.data_precision=12;
#else
image->interlace=jpeg_info.progressive_mode != 0 ? JPEGInterlace :
NoInterlace;
image->compression=JPEGCompression;
#endif
#else
image->compression=JPEGCompression;
image->interlace=JPEGInterlace;
#endif
option=GetImageOption(image_info,"jpeg:colors");
if (option != (const char *) NULL)
{
/*
Let the JPEG library quantize for us.
*/
jpeg_info.quantize_colors=TRUE;
jpeg_info.desired_number_of_colors=(int) StringToUnsignedLong(option);
}
option=GetImageOption(image_info,"jpeg:block-smoothing");
if (option != (const char *) NULL)
jpeg_info.do_block_smoothing=IsStringTrue(option) != MagickFalse ? TRUE :
FALSE;
jpeg_info.dct_method=JDCT_FLOAT;
option=GetImageOption(image_info,"jpeg:dct-method");
if (option != (const char *) NULL)
switch (*option)
{
case 'D':
case 'd':
{
if (LocaleCompare(option,"default") == 0)
jpeg_info.dct_method=JDCT_DEFAULT;
break;
}
case 'F':
case 'f':
{
if (LocaleCompare(option,"fastest") == 0)
jpeg_info.dct_method=JDCT_FASTEST;
if (LocaleCompare(option,"float") == 0)
jpeg_info.dct_method=JDCT_FLOAT;
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(option,"ifast") == 0)
jpeg_info.dct_method=JDCT_IFAST;
if (LocaleCompare(option,"islow") == 0)
jpeg_info.dct_method=JDCT_ISLOW;
break;
}
}
option=GetImageOption(image_info,"jpeg:fancy-upsampling");
if (option != (const char *) NULL)
jpeg_info.do_fancy_upsampling=IsStringTrue(option) != MagickFalse ? TRUE :
FALSE;
(void) jpeg_start_decompress(&jpeg_info);
image->columns=jpeg_info.output_width;
image->rows=jpeg_info.output_height;
image->depth=(size_t) jpeg_info.data_precision;
switch (jpeg_info.out_color_space)
{
case JCS_RGB:
default:
{
(void) SetImageColorspace(image,sRGBColorspace);
break;
}
case JCS_GRAYSCALE:
{
(void) SetImageColorspace(image,GRAYColorspace);
break;
}
case JCS_YCbCr:
{
(void) SetImageColorspace(image,YCbCrColorspace);
break;
}
case JCS_CMYK:
{
(void) SetImageColorspace(image,CMYKColorspace);
break;
}
}
if (IsITUFaxImage(image) != MagickFalse)
{
(void) SetImageColorspace(image,LabColorspace);
jpeg_info.out_color_space=JCS_YCbCr;
}
option=GetImageOption(image_info,"jpeg:colors");
if (option != (const char *) NULL)
if (AcquireImageColormap(image,StringToUnsignedLong(option)) == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
if ((jpeg_info.output_components == 1) && (jpeg_info.quantize_colors == 0))
{
size_t
colors;
colors=(size_t) GetQuantumRange(image->depth)+1;
if (AcquireImageColormap(image,colors) == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
}
if (image->debug != MagickFalse)
{
if (image->interlace != NoInterlace)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: progressive");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: nonprogressive");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Data precision: %d",
(int) jpeg_info.data_precision);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %dx%d",
(int) jpeg_info.output_width,(int) jpeg_info.output_height);
}
JPEGSetImageQuality(&jpeg_info,image);
JPEGSetImageSamplingFactor(&jpeg_info,image);
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
jpeg_info.out_color_space);
(void) SetImageProperty(image,"jpeg:colorspace",value);
if (image_info->ping != MagickFalse)
{
jpeg_destroy_decompress(&jpeg_info);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
jpeg_destroy_decompress(&jpeg_info);
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
if ((jpeg_info.output_components != 1) &&
(jpeg_info.output_components != 3) && (jpeg_info.output_components != 4))
{
jpeg_destroy_decompress(&jpeg_info);
ThrowReaderException(CorruptImageError,"ImageTypeNotSupported");
}
memory_info=AcquireVirtualMemory((size_t) image->columns,
jpeg_info.output_components*sizeof(*jpeg_pixels));
if (memory_info == (MemoryInfo *) NULL)
{
jpeg_destroy_decompress(&jpeg_info);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info);
(void) ResetMagickMemory(jpeg_pixels,0,image->columns*
jpeg_info.output_components*sizeof(*jpeg_pixels));
/*
Convert JPEG pixels to pixel packets.
*/
if (setjmp(error_manager.error_recovery) != 0)
{
if (memory_info != (MemoryInfo *) NULL)
memory_info=RelinquishVirtualMemory(memory_info);
jpeg_destroy_decompress(&jpeg_info);
(void) CloseBlob(image);
number_pixels=(MagickSizeType) image->columns*image->rows;
if (number_pixels != 0)
return(GetFirstImageInList(image));
return(DestroyImage(image));
}
if (jpeg_info.quantize_colors != 0)
{
image->colors=(size_t) jpeg_info.actual_number_of_colors;
if (jpeg_info.out_color_space == JCS_GRAYSCALE)
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]);
image->colormap[i].green=image->colormap[i].red;
image->colormap[i].blue=image->colormap[i].red;
image->colormap[i].opacity=OpaqueOpacity;
}
else
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]);
image->colormap[i].green=ScaleCharToQuantum(jpeg_info.colormap[1][i]);
image->colormap[i].blue=ScaleCharToQuantum(jpeg_info.colormap[2][i]);
image->colormap[i].opacity=OpaqueOpacity;
}
}
scanline[0]=(JSAMPROW) jpeg_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (jpeg_read_scanlines(&jpeg_info,scanline,1) != 1)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"SkipToSyncByte","`%s'",image->filename);
continue;
}
p=jpeg_pixels;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
if (jpeg_info.data_precision > 8)
{
unsigned short
scale;
scale=65535/(unsigned short) GetQuantumRange((size_t)
jpeg_info.data_precision);
if (jpeg_info.output_components == 1)
for (x=0; x < (ssize_t) image->columns; x++)
{
size_t
pixel;
pixel=(size_t) (scale*GETJSAMPLE(*p));
index=ConstrainColormapIndex(image,pixel);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
p++;
q++;
}
else
if (image->colorspace != CMYKColorspace)
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ScaleShortToQuantum((unsigned short)
(scale*GETJSAMPLE(*p++))));
SetPixelGreen(q,ScaleShortToQuantum((unsigned short)
(scale*GETJSAMPLE(*p++))));
SetPixelBlue(q,ScaleShortToQuantum((unsigned short)
(scale*GETJSAMPLE(*p++))));
SetPixelOpacity(q,OpaqueOpacity);
q++;
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelCyan(q,QuantumRange-ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))));
SetPixelMagenta(q,QuantumRange-ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))));
SetPixelYellow(q,QuantumRange-ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))));
SetPixelBlack(indexes+x,QuantumRange-ScaleShortToQuantum(
(unsigned short) (scale*GETJSAMPLE(*p++))));
SetPixelOpacity(q,OpaqueOpacity);
q++;
}
}
else
if (jpeg_info.output_components == 1)
for (x=0; x < (ssize_t) image->columns; x++)
{
index=ConstrainColormapIndex(image,(size_t) GETJSAMPLE(*p));
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
p++;
q++;
}
else
if (image->colorspace != CMYKColorspace)
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
GETJSAMPLE(*p++)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
GETJSAMPLE(*p++)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
GETJSAMPLE(*p++)));
SetPixelOpacity(q,OpaqueOpacity);
q++;
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelCyan(q,QuantumRange-ScaleCharToQuantum((unsigned char)
GETJSAMPLE(*p++)));
SetPixelMagenta(q,QuantumRange-ScaleCharToQuantum((unsigned char)
GETJSAMPLE(*p++)));
SetPixelYellow(q,QuantumRange-ScaleCharToQuantum((unsigned char)
GETJSAMPLE(*p++)));
SetPixelBlack(indexes+x,QuantumRange-ScaleCharToQuantum(
(unsigned char) GETJSAMPLE(*p++)));
SetPixelOpacity(q,OpaqueOpacity);
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
{
jpeg_abort_decompress(&jpeg_info);
break;
}
}
if (status != MagickFalse)
{
error_manager.finished=MagickTrue;
if (setjmp(error_manager.error_recovery) == 0)
(void) jpeg_finish_decompress(&jpeg_info);
}
/*
Free jpeg resources.
*/
jpeg_destroy_decompress(&jpeg_info);
memory_info=RelinquishVirtualMemory(memory_info);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r J P E G I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterJPEGImage() adds properties for the JPEG image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterJPEGImage method is:
%
% size_t RegisterJPEGImage(void)
%
*/
ModuleExport size_t RegisterJPEGImage(void)
{
char
version[MaxTextExtent];
MagickInfo
*entry;
static const char
description[] = "Joint Photographic Experts Group JFIF format";
*version='\0';
#if defined(JPEG_LIB_VERSION)
(void) FormatLocaleString(version,MaxTextExtent,"%d",JPEG_LIB_VERSION);
#endif
entry=SetMagickInfo("JPE");
#if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION)
entry->thread_support=NoThreadSupport;
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJPEGImage;
entry->encoder=(EncodeImageHandler *) WriteJPEGImage;
#endif
entry->magick=(IsImageFormatHandler *) IsJPEG;
entry->adjoin=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString(description);
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jpeg");
entry->module=ConstantString("JPEG");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("JPEG");
#if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION)
entry->thread_support=NoThreadSupport;
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJPEGImage;
entry->encoder=(EncodeImageHandler *) WriteJPEGImage;
#endif
entry->magick=(IsImageFormatHandler *) IsJPEG;
entry->adjoin=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString(description);
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jpeg");
entry->module=ConstantString("JPEG");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("JPG");
#if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION)
entry->thread_support=NoThreadSupport;
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJPEGImage;
entry->encoder=(EncodeImageHandler *) WriteJPEGImage;
#endif
entry->adjoin=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString(description);
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jpeg");
entry->module=ConstantString("JPEG");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("JPS");
#if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION)
entry->thread_support=NoThreadSupport;
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJPEGImage;
entry->encoder=(EncodeImageHandler *) WriteJPEGImage;
#endif
entry->adjoin=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString(description);
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jpeg");
entry->module=ConstantString("JPEG");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("PJPEG");
#if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION)
entry->thread_support=NoThreadSupport;
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
entry->decoder=(DecodeImageHandler *) ReadJPEGImage;
entry->encoder=(EncodeImageHandler *) WriteJPEGImage;
#endif
entry->adjoin=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString(description);
if (*version != '\0')
entry->version=ConstantString(version);
entry->mime_type=ConstantString("image/jpeg");
entry->module=ConstantString("JPEG");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r J P E G I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterJPEGImage() removes format registrations made by the
% JPEG module from the list of supported formats.
%
% The format of the UnregisterJPEGImage method is:
%
% UnregisterJPEGImage(void)
%
*/
ModuleExport void UnregisterJPEGImage(void)
{
(void) UnregisterMagickInfo("PJPG");
(void) UnregisterMagickInfo("JPS");
(void) UnregisterMagickInfo("JPG");
(void) UnregisterMagickInfo("JPG");
(void) UnregisterMagickInfo("JPEG");
(void) UnregisterMagickInfo("JPE");
}
#if defined(MAGICKCORE_JPEG_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e J P E G I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteJPEGImage() writes a JPEG image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the WriteJPEGImage method is:
%
% MagickBooleanType WriteJPEGImage(const ImageInfo *image_info,
% Image *image)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o jpeg_image: The image.
%
%
*/
static QuantizationTable *DestroyQuantizationTable(QuantizationTable *table)
{
assert(table != (QuantizationTable *) NULL);
if (table->slot != (char *) NULL)
table->slot=DestroyString(table->slot);
if (table->description != (char *) NULL)
table->description=DestroyString(table->description);
if (table->levels != (unsigned int *) NULL)
table->levels=(unsigned int *) RelinquishMagickMemory(table->levels);
table=(QuantizationTable *) RelinquishMagickMemory(table);
return(table);
}
static boolean EmptyOutputBuffer(j_compress_ptr cinfo)
{
DestinationManager
*destination;
destination=(DestinationManager *) cinfo->dest;
destination->manager.free_in_buffer=(size_t) WriteBlob(destination->image,
MaxBufferExtent,destination->buffer);
if (destination->manager.free_in_buffer != MaxBufferExtent)
ERREXIT(cinfo,JERR_FILE_WRITE);
destination->manager.next_output_byte=destination->buffer;
return(TRUE);
}
static QuantizationTable *GetQuantizationTable(const char *filename,
const char *slot,ExceptionInfo *exception)
{
char
*p,
*xml;
const char
*attribute,
*content;
double
value;
register ssize_t
i;
QuantizationTable
*table;
size_t
length;
ssize_t
j;
XMLTreeInfo
*description,
*levels,
*quantization_tables,
*table_iterator;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading quantization tables \"%s\" ...",filename);
table=(QuantizationTable *) NULL;
xml=FileToString(filename,~0UL,exception);
if (xml == (char *) NULL)
return(table);
quantization_tables=NewXMLTree(xml,exception);
if (quantization_tables == (XMLTreeInfo *) NULL)
{
xml=DestroyString(xml);
return(table);
}
for (table_iterator=GetXMLTreeChild(quantization_tables,"table");
table_iterator != (XMLTreeInfo *) NULL;
table_iterator=GetNextXMLTreeTag(table_iterator))
{
attribute=GetXMLTreeAttribute(table_iterator,"slot");
if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(table_iterator,"alias");
if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0))
break;
}
if (table_iterator == (XMLTreeInfo *) NULL)
{
xml=DestroyString(xml);
return(table);
}
description=GetXMLTreeChild(table_iterator,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement","<description>, slot \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
xml=DestroyString(xml);
return(table);
}
levels=GetXMLTreeChild(table_iterator,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement","<levels>, slot \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
xml=DestroyString(xml);
return(table);
}
table=(QuantizationTable *) AcquireMagickMemory(sizeof(*table));
if (table == (QuantizationTable *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAcquireQuantizationTable");
table->slot=(char *) NULL;
table->description=(char *) NULL;
table->levels=(unsigned int *) NULL;
attribute=GetXMLTreeAttribute(table_iterator,"slot");
if (attribute != (char *) NULL)
table->slot=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
table->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute","<levels width>, slot \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
table->width=StringToUnsignedLong(attribute);
if (table->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute","<levels width>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute","<levels height>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
table->height=StringToUnsignedLong(attribute);
if (table->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute","<levels height>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
attribute=GetXMLTreeAttribute(levels,"divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute","<levels divisor>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
table->divisor=InterpretLocaleValue(attribute,(char **) NULL);
if (table->divisor == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute","<levels divisor>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent","<levels>, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
length=(size_t) table->width*table->height;
if (length < 64)
length=64;
table->levels=(unsigned int *) AcquireQuantumMemory(length,
sizeof(*table->levels));
if (table->levels == (unsigned int *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAcquireQuantizationTable");
for (i=0; i < (ssize_t) (table->width*table->height); i++)
{
table->levels[i]=(unsigned int) (InterpretLocaleValue(content,&p)/
table->divisor+0.5);
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
content=p;
}
value=InterpretLocaleValue(content,&p);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent","<level> too many values, table \"%s\"",slot);
quantization_tables=DestroyXMLTree(quantization_tables);
table=DestroyQuantizationTable(table);
xml=DestroyString(xml);
return(table);
}
for (j=i; j < 64; j++)
table->levels[j]=table->levels[j-1];
quantization_tables=DestroyXMLTree(quantization_tables);
xml=DestroyString(xml);
return(table);
}
static void InitializeDestination(j_compress_ptr cinfo)
{
DestinationManager
*destination;
destination=(DestinationManager *) cinfo->dest;
destination->buffer=(JOCTET *) (*cinfo->mem->alloc_small)
((j_common_ptr) cinfo,JPOOL_IMAGE,MaxBufferExtent*sizeof(JOCTET));
destination->manager.next_output_byte=destination->buffer;
destination->manager.free_in_buffer=MaxBufferExtent;
}
static void TerminateDestination(j_compress_ptr cinfo)
{
DestinationManager
*destination;
destination=(DestinationManager *) cinfo->dest;
if ((MaxBufferExtent-(int) destination->manager.free_in_buffer) > 0)
{
ssize_t
count;
count=WriteBlob(destination->image,MaxBufferExtent-
destination->manager.free_in_buffer,destination->buffer);
if (count != (ssize_t)
(MaxBufferExtent-destination->manager.free_in_buffer))
ERREXIT(cinfo,JERR_FILE_WRITE);
}
}
static void WriteProfile(j_compress_ptr jpeg_info,Image *image)
{
const char
*name;
const StringInfo
*profile;
MagickBooleanType
iptc;
register ssize_t
i;
size_t
length,
tag_length;
StringInfo
*custom_profile;
/*
Save image profile as a APP marker.
*/
iptc=MagickFalse;
custom_profile=AcquireStringInfo(65535L);
ResetImageProfileIterator(image);
for (name=GetNextImageProfile(image); name != (const char *) NULL; )
{
register unsigned char
*p;
profile=GetImageProfile(image,name);
p=GetStringInfoDatum(custom_profile);
if (LocaleCompare(name,"EXIF") == 0)
{
length=GetStringInfoLength(profile);
if (length > 65533L)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CoderWarning,"ExifProfileSizeExceedsLimit","`%s'",
image->filename);
length=65533L;
}
jpeg_write_marker(jpeg_info,XML_MARKER,GetStringInfoDatum(profile),
(unsigned int) length);
}
if (LocaleCompare(name,"ICC") == 0)
{
register unsigned char
*p;
tag_length=strlen(ICC_PROFILE);
p=GetStringInfoDatum(custom_profile);
(void) CopyMagickMemory(p,ICC_PROFILE,tag_length);
p[tag_length]='\0';
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65519L)
{
length=MagickMin(GetStringInfoLength(profile)-i,65519L);
p[12]=(unsigned char) ((i/65519L)+1);
p[13]=(unsigned char) (GetStringInfoLength(profile)/65519L+1);
(void) CopyMagickMemory(p+tag_length+3,GetStringInfoDatum(profile)+i,
length);
jpeg_write_marker(jpeg_info,ICC_MARKER,GetStringInfoDatum(
custom_profile),(unsigned int) (length+tag_length+3));
}
}
if (((LocaleCompare(name,"IPTC") == 0) ||
(LocaleCompare(name,"8BIM") == 0)) && (iptc == MagickFalse))
{
size_t
roundup;
iptc=MagickTrue;
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65500L)
{
length=MagickMin(GetStringInfoLength(profile)-i,65500L);
roundup=(size_t) (length & 0x01);
if (LocaleNCompare((char *) GetStringInfoDatum(profile),"8BIM",4) == 0)
{
(void) memcpy(p,"Photoshop 3.0 ",14);
tag_length=14;
}
else
{
(void) CopyMagickMemory(p,"Photoshop 3.0 8BIM\04\04\0\0\0\0",24);
tag_length=26;
p[24]=(unsigned char) (length >> 8);
p[25]=(unsigned char) (length & 0xff);
}
p[13]=0x00;
(void) memcpy(p+tag_length,GetStringInfoDatum(profile)+i,length);
if (roundup != 0)
p[length+tag_length]='\0';
jpeg_write_marker(jpeg_info,IPTC_MARKER,GetStringInfoDatum(
custom_profile),(unsigned int) (length+tag_length+roundup));
}
}
if (LocaleCompare(name,"XMP") == 0)
{
StringInfo
*xmp_profile;
/*
Add namespace to XMP profile.
*/
xmp_profile=StringToStringInfo("http://ns.adobe.com/xap/1.0/ ");
if (xmp_profile != (StringInfo *) NULL)
{
if (profile != (StringInfo *) NULL)
ConcatenateStringInfo(xmp_profile,profile);
GetStringInfoDatum(xmp_profile)[28]='\0';
for (i=0; i < (ssize_t) GetStringInfoLength(xmp_profile); i+=65533L)
{
length=MagickMin(GetStringInfoLength(xmp_profile)-i,65533L);
jpeg_write_marker(jpeg_info,XML_MARKER,
GetStringInfoDatum(xmp_profile)+i,(unsigned int) length);
}
xmp_profile=DestroyStringInfo(xmp_profile);
}
}
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"%s profile: %.20g bytes",name,(double) GetStringInfoLength(profile));
name=GetNextImageProfile(image);
}
custom_profile=DestroyStringInfo(custom_profile);
}
static void JPEGDestinationManager(j_compress_ptr cinfo,Image * image)
{
DestinationManager
*destination;
cinfo->dest=(struct jpeg_destination_mgr *) (*cinfo->mem->alloc_small)
((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(DestinationManager));
destination=(DestinationManager *) cinfo->dest;
destination->manager.init_destination=InitializeDestination;
destination->manager.empty_output_buffer=EmptyOutputBuffer;
destination->manager.term_destination=TerminateDestination;
destination->image=image;
}
static char **SamplingFactorToList(const char *text)
{
char
**textlist;
register char
*q;
register const char
*p;
register ssize_t
i;
if (text == (char *) NULL)
return((char **) NULL);
/*
Convert string to an ASCII list.
*/
textlist=(char **) AcquireQuantumMemory((size_t) MAX_COMPONENTS,
sizeof(*textlist));
if (textlist == (char **) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText");
p=text;
for (i=0; i < (ssize_t) MAX_COMPONENTS; i++)
{
for (q=(char *) p; *q != '\0'; q++)
if (*q == ',')
break;
textlist[i]=(char *) AcquireQuantumMemory((size_t) (q-p)+MaxTextExtent,
sizeof(*textlist[i]));
if (textlist[i] == (char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText");
(void) CopyMagickString(textlist[i],p,(size_t) (q-p+1));
if (*q == '\r')
q++;
if (*q == '\0')
break;
p=q+1;
}
for (i++; i < (ssize_t) MAX_COMPONENTS; i++)
textlist[i]=ConstantString("1x1");
return(textlist);
}
static MagickBooleanType WriteJPEGImage(const ImageInfo *image_info,
Image *image)
{
const char
*option,
*sampling_factor,
*value;
ErrorManager
error_manager;
ExceptionInfo
*exception;
Image
*volatile volatile_image;
int
colorspace,
quality;
JSAMPLE
*volatile jpeg_pixels;
JSAMPROW
scanline[1];
MagickBooleanType
status;
MemoryInfo
*memory_info;
register JSAMPLE
*q;
register ssize_t
i;
ssize_t
y;
struct jpeg_compress_struct
jpeg_info;
struct jpeg_error_mgr
jpeg_error;
unsigned short
scale;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=(&image->exception);
if ((LocaleCompare(image_info->magick,"JPS") == 0) &&
(image->next != (Image *) NULL))
image=AppendImages(image,MagickFalse,exception);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
/*
Initialize JPEG parameters.
*/
(void) ResetMagickMemory(&error_manager,0,sizeof(error_manager));
(void) ResetMagickMemory(&jpeg_info,0,sizeof(jpeg_info));
(void) ResetMagickMemory(&jpeg_error,0,sizeof(jpeg_error));
volatile_image=image;
jpeg_info.client_data=(void *) volatile_image;
jpeg_info.err=jpeg_std_error(&jpeg_error);
jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler;
jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler;
error_manager.image=volatile_image;
memory_info=(MemoryInfo *) NULL;
if (setjmp(error_manager.error_recovery) != 0)
{
jpeg_destroy_compress(&jpeg_info);
(void) CloseBlob(volatile_image);
return(MagickFalse);
}
jpeg_info.client_data=(void *) &error_manager;
jpeg_create_compress(&jpeg_info);
JPEGDestinationManager(&jpeg_info,image);
if ((image->columns != (unsigned int) image->columns) ||
(image->rows != (unsigned int) image->rows))
ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit");
jpeg_info.image_width=(unsigned int) image->columns;
jpeg_info.image_height=(unsigned int) image->rows;
jpeg_info.input_components=3;
jpeg_info.data_precision=8;
jpeg_info.in_color_space=JCS_RGB;
switch (image->colorspace)
{
case CMYKColorspace:
{
jpeg_info.input_components=4;
jpeg_info.in_color_space=JCS_CMYK;
break;
}
case YCbCrColorspace:
case Rec601YCbCrColorspace:
case Rec709YCbCrColorspace:
{
jpeg_info.in_color_space=JCS_YCbCr;
break;
}
case GRAYColorspace:
case Rec601LumaColorspace:
case Rec709LumaColorspace:
{
if (image_info->type == TrueColorType)
break;
jpeg_info.input_components=1;
jpeg_info.in_color_space=JCS_GRAYSCALE;
break;
}
default:
{
(void) TransformImageColorspace(image,sRGBColorspace);
if (image_info->type == TrueColorType)
break;
if (SetImageGray(image,&image->exception) != MagickFalse)
{
jpeg_info.input_components=1;
jpeg_info.in_color_space=JCS_GRAYSCALE;
}
break;
}
}
jpeg_set_defaults(&jpeg_info);
if (jpeg_info.in_color_space == JCS_CMYK)
jpeg_set_colorspace(&jpeg_info,JCS_YCCK);
if ((jpeg_info.data_precision != 12) && (image->depth <= 8))
jpeg_info.data_precision=8;
else
jpeg_info.data_precision=BITS_IN_JSAMPLE;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Image resolution: %.20g,%.20g",image->x_resolution,image->y_resolution);
if ((image->x_resolution != 0.0) && (image->y_resolution != 0.0))
{
/*
Set image resolution.
*/
jpeg_info.write_JFIF_header=TRUE;
jpeg_info.X_density=(UINT16) image->x_resolution;
jpeg_info.Y_density=(UINT16) image->y_resolution;
/*
Set image resolution units.
*/
if (image->units == PixelsPerInchResolution)
jpeg_info.density_unit=(UINT8) 1;
if (image->units == PixelsPerCentimeterResolution)
jpeg_info.density_unit=(UINT8) 2;
}
jpeg_info.dct_method=JDCT_FLOAT;
option=GetImageOption(image_info,"jpeg:dct-method");
if (option != (const char *) NULL)
switch (*option)
{
case 'D':
case 'd':
{
if (LocaleCompare(option,"default") == 0)
jpeg_info.dct_method=JDCT_DEFAULT;
break;
}
case 'F':
case 'f':
{
if (LocaleCompare(option,"fastest") == 0)
jpeg_info.dct_method=JDCT_FASTEST;
if (LocaleCompare(option,"float") == 0)
jpeg_info.dct_method=JDCT_FLOAT;
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(option,"ifast") == 0)
jpeg_info.dct_method=JDCT_IFAST;
if (LocaleCompare(option,"islow") == 0)
jpeg_info.dct_method=JDCT_ISLOW;
break;
}
}
option=GetImageOption(image_info,"jpeg:optimize-coding");
if (option != (const char *) NULL)
jpeg_info.optimize_coding=IsStringTrue(option) != MagickFalse ? TRUE :
FALSE;
else
{
MagickSizeType
length;
length=(MagickSizeType) jpeg_info.input_components*image->columns*
image->rows*sizeof(JSAMPLE);
if (length == (MagickSizeType) ((size_t) length))
{
/*
Perform optimization only if available memory resources permit it.
*/
status=AcquireMagickResource(MemoryResource,length);
RelinquishMagickResource(MemoryResource,length);
jpeg_info.optimize_coding=status == MagickFalse ? FALSE : TRUE;
}
}
#if (JPEG_LIB_VERSION >= 61) && defined(C_PROGRESSIVE_SUPPORTED)
if ((LocaleCompare(image_info->magick,"PJPEG") == 0) ||
(image_info->interlace != NoInterlace))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: progressive");
jpeg_simple_progression(&jpeg_info);
}
else
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: non-progressive");
#else
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: nonprogressive");
#endif
quality=92;
if ((image_info->compression != LosslessJPEGCompression) &&
(image->quality <= 100))
{
if (image->quality != UndefinedCompressionQuality)
quality=(int) image->quality;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: %.20g",
(double) image->quality);
}
else
{
#if !defined(C_LOSSLESS_SUPPORTED)
quality=100;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: 100");
#else
if (image->quality < 100)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CoderWarning,"LosslessToLossyJPEGConversion","`%s'",image->filename);
else
{
int
point_transform,
predictor;
predictor=image->quality/100; /* range 1-7 */
point_transform=image->quality % 20; /* range 0-15 */
jpeg_simple_lossless(&jpeg_info,predictor,point_transform);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Compression: lossless");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Predictor: %d",predictor);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Point Transform: %d",point_transform);
}
}
#endif
}
option=GetImageOption(image_info,"jpeg:extent");
if (option != (const char *) NULL)
{
Image
*jpeg_image;
ImageInfo
*jpeg_info;
jpeg_info=CloneImageInfo(image_info);
jpeg_info->blob=NULL;
jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (jpeg_image != (Image *) NULL)
{
MagickSizeType
extent;
size_t
maximum,
minimum;
/*
Search for compression quality that does not exceed image extent.
*/
jpeg_image->quality=0;
extent=(MagickSizeType) SiPrefixToDoubleInterval(option,100.0);
(void) DeleteImageOption(jpeg_info,"jpeg:extent");
(void) DeleteImageArtifact(jpeg_image,"jpeg:extent");
maximum=image_info->quality;
if (maximum < 2)
maximum=101;
for (minimum=2; minimum < maximum; )
{
(void) AcquireUniqueFilename(jpeg_image->filename);
jpeg_image->quality=minimum+(maximum-minimum+1)/2;
(void) WriteJPEGImage(jpeg_info,jpeg_image);
if (GetBlobSize(jpeg_image) <= extent)
minimum=jpeg_image->quality+1;
else
maximum=jpeg_image->quality-1;
(void) RelinquishUniqueFileResource(jpeg_image->filename);
}
quality=(int) minimum-1;
jpeg_image=DestroyImage(jpeg_image);
}
jpeg_info=DestroyImageInfo(jpeg_info);
}
jpeg_set_quality(&jpeg_info,quality,TRUE);
#if (JPEG_LIB_VERSION >= 70)
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
int
flags;
/*
Set quality scaling for luminance and chrominance separately.
*/
flags=ParseGeometry(option,&geometry_info);
if (((flags & RhoValue) != 0) && ((flags & SigmaValue) != 0))
{
jpeg_info.q_scale_factor[0]=jpeg_quality_scaling((int)
(geometry_info.rho+0.5));
jpeg_info.q_scale_factor[1]=jpeg_quality_scaling((int)
(geometry_info.sigma+0.5));
jpeg_default_qtables(&jpeg_info,TRUE);
}
}
#endif
colorspace=jpeg_info.in_color_space;
value=GetImageOption(image_info,"jpeg:colorspace");
if (value == (char *) NULL)
value=GetImageProperty(image,"jpeg:colorspace");
if (value != (char *) NULL)
colorspace=StringToInteger(value);
sampling_factor=(const char *) NULL;
if (colorspace == jpeg_info.in_color_space)
{
value=GetImageOption(image_info,"jpeg:sampling-factor");
if (value == (char *) NULL)
value=GetImageProperty(image,"jpeg:sampling-factor");
if (value != (char *) NULL)
{
sampling_factor=value;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Input sampling-factors=%s",sampling_factor);
}
}
if (image_info->sampling_factor != (char *) NULL)
sampling_factor=image_info->sampling_factor;
if (sampling_factor == (const char *) NULL)
{
if (quality >= 90)
for (i=0; i < MAX_COMPONENTS; i++)
{
jpeg_info.comp_info[i].h_samp_factor=1;
jpeg_info.comp_info[i].v_samp_factor=1;
}
}
else
{
char
**factors;
GeometryInfo
geometry_info;
MagickStatusType
flags;
/*
Set sampling factor.
*/
i=0;
factors=SamplingFactorToList(sampling_factor);
if (factors != (char **) NULL)
{
for (i=0; i < MAX_COMPONENTS; i++)
{
if (factors[i] == (char *) NULL)
break;
flags=ParseGeometry(factors[i],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
jpeg_info.comp_info[i].h_samp_factor=(int) geometry_info.rho;
jpeg_info.comp_info[i].v_samp_factor=(int) geometry_info.sigma;
factors[i]=(char *) RelinquishMagickMemory(factors[i]);
}
factors=(char **) RelinquishMagickMemory(factors);
}
for ( ; i < MAX_COMPONENTS; i++)
{
jpeg_info.comp_info[i].h_samp_factor=1;
jpeg_info.comp_info[i].v_samp_factor=1;
}
}
option=GetImageOption(image_info,"jpeg:q-table");
if (option != (const char *) NULL)
{
QuantizationTable
*table;
/*
Custom quantization tables.
*/
table=GetQuantizationTable(option,"0",&image->exception);
if (table != (QuantizationTable *) NULL)
{
for (i=0; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=0;
jpeg_add_quant_table(&jpeg_info,0,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
table=GetQuantizationTable(option,"1",&image->exception);
if (table != (QuantizationTable *) NULL)
{
for (i=1; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=1;
jpeg_add_quant_table(&jpeg_info,1,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
table=GetQuantizationTable(option,"2",&image->exception);
if (table != (QuantizationTable *) NULL)
{
for (i=2; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=2;
jpeg_add_quant_table(&jpeg_info,2,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
table=GetQuantizationTable(option,"3",&image->exception);
if (table != (QuantizationTable *) NULL)
{
for (i=3; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=3;
jpeg_add_quant_table(&jpeg_info,3,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
}
jpeg_start_compress(&jpeg_info,TRUE);
if (image->debug != MagickFalse)
{
if (image->storage_class == PseudoClass)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Storage class: PseudoClass");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Storage class: DirectClass");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Depth: %.20g",
(double) image->depth);
if (image->colors != 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Number of colors: %.20g",(double) image->colors);
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Number of colors: unspecified");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"JPEG data precision: %d",(int) jpeg_info.data_precision);
switch (image->colorspace)
{
case CMYKColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Storage class: DirectClass");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: CMYK");
break;
}
case YCbCrColorspace:
case Rec601YCbCrColorspace:
case Rec709YCbCrColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: YCbCr");
break;
}
default:
break;
}
switch (image->colorspace)
{
case CMYKColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: CMYK");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor,
jpeg_info.comp_info[3].h_samp_factor,
jpeg_info.comp_info[3].v_samp_factor);
break;
}
case GRAYColorspace:
case Rec601LumaColorspace:
case Rec709LumaColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: GRAY");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d",jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor);
break;
}
case sRGBColorspace:
case RGBColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Image colorspace is RGB");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor);
break;
}
case YCbCrColorspace:
case Rec601YCbCrColorspace:
case Rec709YCbCrColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: YCbCr");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor);
break;
}
default:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d",
image->colorspace);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor,
jpeg_info.comp_info[3].h_samp_factor,
jpeg_info.comp_info[3].v_samp_factor);
break;
}
}
}
/*
Write JPEG profiles.
*/
value=GetImageProperty(image,"comment");
if (value != (char *) NULL)
for (i=0; i < (ssize_t) strlen(value); i+=65533L)
jpeg_write_marker(&jpeg_info,JPEG_COM,(unsigned char *) value+i,
(unsigned int) MagickMin((size_t) strlen(value+i),65533L));
if (image->profiles != (void *) NULL)
WriteProfile(&jpeg_info,image);
/*
Convert MIFF to JPEG raster pixels.
*/
memory_info=AcquireVirtualMemory((size_t) image->columns,
jpeg_info.input_components*sizeof(*jpeg_pixels));
if (memory_info == (MemoryInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info);
if (setjmp(error_manager.error_recovery) != 0)
{
jpeg_destroy_compress(&jpeg_info);
if (memory_info != (MemoryInfo *) NULL)
memory_info=RelinquishVirtualMemory(memory_info);
(void) CloseBlob(image);
return(MagickFalse);
}
scanline[0]=(JSAMPROW) jpeg_pixels;
scale=65535/(unsigned short) GetQuantumRange((size_t)
jpeg_info.data_precision);
if (scale == 0)
scale=1;
if (jpeg_info.data_precision <= 8)
{
if ((jpeg_info.in_color_space == JCS_RGB) ||
(jpeg_info.in_color_space == JCS_YCbCr))
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) ScaleQuantumToChar(GetPixelRed(p));
*q++=(JSAMPLE) ScaleQuantumToChar(GetPixelGreen(p));
*q++=(JSAMPLE) ScaleQuantumToChar(GetPixelBlue(p));
p++;
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
if (jpeg_info.in_color_space == JCS_GRAYSCALE)
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) ScaleQuantumToChar(ClampToQuantum(
GetPixelLuma(image,p)));
p++;
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
q=jpeg_pixels;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Convert DirectClass packets to contiguous CMYK scanlines.
*/
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelCyan(p))));
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelMagenta(p))));
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelYellow(p))));
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelBlack(indexes+x))));
p++;
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
else
if (jpeg_info.in_color_space == JCS_GRAYSCALE)
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) (ScaleQuantumToShort(ClampToQuantum(
GetPixelLuma(image,p)))/scale);
p++;
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
if ((jpeg_info.in_color_space == JCS_RGB) ||
(jpeg_info.in_color_space == JCS_YCbCr))
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelRed(p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelGreen(p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelBlue(p))/scale);
p++;
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
q=jpeg_pixels;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Convert DirectClass packets to contiguous CMYK scanlines.
*/
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelRed(p))/
scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelGreen(p))/
scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelBlue(p))/
scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-
GetPixelIndex(indexes+x))/scale);
p++;
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
if (y == (ssize_t) image->rows)
jpeg_finish_compress(&jpeg_info);
/*
Relinquish resources.
*/
jpeg_destroy_compress(&jpeg_info);
memory_info=RelinquishVirtualMemory(memory_info);
(void) CloseBlob(image);
return(MagickTrue);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_2575_0 |
crossvul-cpp_data_good_1331_0 | // SPDX-License-Identifier: GPL-2.0
/* Parts of this driver are based on the following:
* - Kvaser linux leaf driver (version 4.78)
* - CAN driver for esd CAN-USB/2
* - Kvaser linux usbcanII driver (version 5.3)
*
* Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved.
* Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
* Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
* Copyright (C) 2015 Valeo S.A.
*/
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/usb.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include <linux/can/netlink.h>
#include "kvaser_usb.h"
/* Forward declaration */
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
#define CAN_USB_CLOCK 8000000
#define MAX_USBCAN_NET_DEVICES 2
/* Command header size */
#define CMD_HEADER_LEN 2
/* Kvaser CAN message flags */
#define MSG_FLAG_ERROR_FRAME BIT(0)
#define MSG_FLAG_OVERRUN BIT(1)
#define MSG_FLAG_NERR BIT(2)
#define MSG_FLAG_WAKEUP BIT(3)
#define MSG_FLAG_REMOTE_FRAME BIT(4)
#define MSG_FLAG_RESERVED BIT(5)
#define MSG_FLAG_TX_ACK BIT(6)
#define MSG_FLAG_TX_REQUEST BIT(7)
/* CAN states (M16C CxSTRH register) */
#define M16C_STATE_BUS_RESET BIT(0)
#define M16C_STATE_BUS_ERROR BIT(4)
#define M16C_STATE_BUS_PASSIVE BIT(5)
#define M16C_STATE_BUS_OFF BIT(6)
/* Leaf/usbcan command ids */
#define CMD_RX_STD_MESSAGE 12
#define CMD_TX_STD_MESSAGE 13
#define CMD_RX_EXT_MESSAGE 14
#define CMD_TX_EXT_MESSAGE 15
#define CMD_SET_BUS_PARAMS 16
#define CMD_CHIP_STATE_EVENT 20
#define CMD_SET_CTRL_MODE 21
#define CMD_RESET_CHIP 24
#define CMD_START_CHIP 26
#define CMD_START_CHIP_REPLY 27
#define CMD_STOP_CHIP 28
#define CMD_STOP_CHIP_REPLY 29
#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33
#define CMD_GET_CARD_INFO 34
#define CMD_GET_CARD_INFO_REPLY 35
#define CMD_GET_SOFTWARE_INFO 38
#define CMD_GET_SOFTWARE_INFO_REPLY 39
#define CMD_FLUSH_QUEUE 48
#define CMD_TX_ACKNOWLEDGE 50
#define CMD_CAN_ERROR_EVENT 51
#define CMD_FLUSH_QUEUE_REPLY 68
#define CMD_LEAF_LOG_MESSAGE 106
/* error factors */
#define M16C_EF_ACKE BIT(0)
#define M16C_EF_CRCE BIT(1)
#define M16C_EF_FORME BIT(2)
#define M16C_EF_STFE BIT(3)
#define M16C_EF_BITE0 BIT(4)
#define M16C_EF_BITE1 BIT(5)
#define M16C_EF_RCVE BIT(6)
#define M16C_EF_TRE BIT(7)
/* Only Leaf-based devices can report M16C error factors,
* thus define our own error status flags for USBCANII
*/
#define USBCAN_ERROR_STATE_NONE 0
#define USBCAN_ERROR_STATE_TX_ERROR BIT(0)
#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
/* bittiming parameters */
#define KVASER_USB_TSEG1_MIN 1
#define KVASER_USB_TSEG1_MAX 16
#define KVASER_USB_TSEG2_MIN 1
#define KVASER_USB_TSEG2_MAX 8
#define KVASER_USB_SJW_MAX 4
#define KVASER_USB_BRP_MIN 1
#define KVASER_USB_BRP_MAX 64
#define KVASER_USB_BRP_INC 1
/* ctrl modes */
#define KVASER_CTRL_MODE_NORMAL 1
#define KVASER_CTRL_MODE_SILENT 2
#define KVASER_CTRL_MODE_SELFRECEPTION 3
#define KVASER_CTRL_MODE_OFF 4
/* Extended CAN identifier flag */
#define KVASER_EXTENDED_FRAME BIT(31)
struct kvaser_cmd_simple {
u8 tid;
u8 channel;
} __packed;
struct kvaser_cmd_cardinfo {
u8 tid;
u8 nchannels;
__le32 serial_number;
__le32 padding0;
__le32 clock_resolution;
__le32 mfgdate;
u8 ean[8];
u8 hw_revision;
union {
struct {
u8 usb_hs_mode;
} __packed leaf1;
struct {
u8 padding;
} __packed usbcan1;
} __packed;
__le16 padding1;
} __packed;
struct leaf_cmd_softinfo {
u8 tid;
u8 padding0;
__le32 sw_options;
__le32 fw_version;
__le16 max_outstanding_tx;
__le16 padding1[9];
} __packed;
struct usbcan_cmd_softinfo {
u8 tid;
u8 fw_name[5];
__le16 max_outstanding_tx;
u8 padding[6];
__le32 fw_version;
__le16 checksum;
__le16 sw_options;
} __packed;
struct kvaser_cmd_busparams {
u8 tid;
u8 channel;
__le32 bitrate;
u8 tseg1;
u8 tseg2;
u8 sjw;
u8 no_samp;
} __packed;
struct kvaser_cmd_tx_can {
u8 channel;
u8 tid;
u8 data[14];
union {
struct {
u8 padding;
u8 flags;
} __packed leaf;
struct {
u8 flags;
u8 padding;
} __packed usbcan;
} __packed;
} __packed;
struct kvaser_cmd_rx_can_header {
u8 channel;
u8 flag;
} __packed;
struct leaf_cmd_rx_can {
u8 channel;
u8 flag;
__le16 time[3];
u8 data[14];
} __packed;
struct usbcan_cmd_rx_can {
u8 channel;
u8 flag;
u8 data[14];
__le16 time;
} __packed;
struct leaf_cmd_chip_state_event {
u8 tid;
u8 channel;
__le16 time[3];
u8 tx_errors_count;
u8 rx_errors_count;
u8 status;
u8 padding[3];
} __packed;
struct usbcan_cmd_chip_state_event {
u8 tid;
u8 channel;
u8 tx_errors_count;
u8 rx_errors_count;
__le16 time;
u8 status;
u8 padding[3];
} __packed;
struct kvaser_cmd_tx_acknowledge_header {
u8 channel;
u8 tid;
} __packed;
struct leaf_cmd_error_event {
u8 tid;
u8 flags;
__le16 time[3];
u8 channel;
u8 padding;
u8 tx_errors_count;
u8 rx_errors_count;
u8 status;
u8 error_factor;
} __packed;
struct usbcan_cmd_error_event {
u8 tid;
u8 padding;
u8 tx_errors_count_ch0;
u8 rx_errors_count_ch0;
u8 tx_errors_count_ch1;
u8 rx_errors_count_ch1;
u8 status_ch0;
u8 status_ch1;
__le16 time;
} __packed;
struct kvaser_cmd_ctrl_mode {
u8 tid;
u8 channel;
u8 ctrl_mode;
u8 padding[3];
} __packed;
struct kvaser_cmd_flush_queue {
u8 tid;
u8 channel;
u8 flags;
u8 padding[3];
} __packed;
struct leaf_cmd_log_message {
u8 channel;
u8 flags;
__le16 time[3];
u8 dlc;
u8 time_offset;
__le32 id;
u8 data[8];
} __packed;
struct kvaser_cmd {
u8 len;
u8 id;
union {
struct kvaser_cmd_simple simple;
struct kvaser_cmd_cardinfo cardinfo;
struct kvaser_cmd_busparams busparams;
struct kvaser_cmd_rx_can_header rx_can_header;
struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header;
union {
struct leaf_cmd_softinfo softinfo;
struct leaf_cmd_rx_can rx_can;
struct leaf_cmd_chip_state_event chip_state_event;
struct leaf_cmd_error_event error_event;
struct leaf_cmd_log_message log_message;
} __packed leaf;
union {
struct usbcan_cmd_softinfo softinfo;
struct usbcan_cmd_rx_can rx_can;
struct usbcan_cmd_chip_state_event chip_state_event;
struct usbcan_cmd_error_event error_event;
} __packed usbcan;
struct kvaser_cmd_tx_can tx_can;
struct kvaser_cmd_ctrl_mode ctrl_mode;
struct kvaser_cmd_flush_queue flush_queue;
} u;
} __packed;
/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
* handling. Some discrepancies between the two families exist:
*
* - USBCAN firmware does not report M16C "error factors"
* - USBCAN controllers has difficulties reporting if the raised error
* event is for ch0 or ch1. They leave such arbitration to the OS
* driver by letting it compare error counters with previous values
* and decide the error event's channel. Thus for USBCAN, the channel
* field is only advisory.
*/
struct kvaser_usb_err_summary {
u8 channel, status, txerr, rxerr;
union {
struct {
u8 error_factor;
} leaf;
struct {
u8 other_ch_status;
u8 error_state;
} usbcan;
};
};
static void *
kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
const struct sk_buff *skb, int *frame_len,
int *cmd_len, u16 transid)
{
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
u8 *cmd_tx_can_flags = NULL; /* GCC */
struct can_frame *cf = (struct can_frame *)skb->data;
*frame_len = cf->can_dlc;
cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
if (cmd) {
cmd->u.tx_can.tid = transid & 0xff;
cmd->len = *cmd_len = CMD_HEADER_LEN +
sizeof(struct kvaser_cmd_tx_can);
cmd->u.tx_can.channel = priv->channel;
switch (dev->card_data.leaf.family) {
case KVASER_LEAF:
cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags;
break;
case KVASER_USBCAN:
cmd_tx_can_flags = &cmd->u.tx_can.usbcan.flags;
break;
}
*cmd_tx_can_flags = 0;
if (cf->can_id & CAN_EFF_FLAG) {
cmd->id = CMD_TX_EXT_MESSAGE;
cmd->u.tx_can.data[0] = (cf->can_id >> 24) & 0x1f;
cmd->u.tx_can.data[1] = (cf->can_id >> 18) & 0x3f;
cmd->u.tx_can.data[2] = (cf->can_id >> 14) & 0x0f;
cmd->u.tx_can.data[3] = (cf->can_id >> 6) & 0xff;
cmd->u.tx_can.data[4] = cf->can_id & 0x3f;
} else {
cmd->id = CMD_TX_STD_MESSAGE;
cmd->u.tx_can.data[0] = (cf->can_id >> 6) & 0x1f;
cmd->u.tx_can.data[1] = cf->can_id & 0x3f;
}
cmd->u.tx_can.data[5] = cf->can_dlc;
memcpy(&cmd->u.tx_can.data[6], cf->data, cf->can_dlc);
if (cf->can_id & CAN_RTR_FLAG)
*cmd_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
}
return cmd;
}
static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
struct kvaser_cmd *cmd)
{
struct kvaser_cmd *tmp;
void *buf;
int actual_len;
int err;
int pos;
unsigned long to = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT);
buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
do {
err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE,
&actual_len);
if (err < 0)
goto end;
pos = 0;
while (pos <= actual_len - CMD_HEADER_LEN) {
tmp = buf + pos;
/* Handle commands crossing the USB endpoint max packet
* size boundary. Check kvaser_usb_read_bulk_callback()
* for further details.
*/
if (tmp->len == 0) {
pos = round_up(pos,
le16_to_cpu
(dev->bulk_in->wMaxPacketSize));
continue;
}
if (pos + tmp->len > actual_len) {
dev_err_ratelimited(&dev->intf->dev,
"Format error\n");
break;
}
if (tmp->id == id) {
memcpy(cmd, tmp, tmp->len);
goto end;
}
pos += tmp->len;
}
} while (time_before(jiffies, to));
err = -EINVAL;
end:
kfree(buf);
return err;
}
static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
u8 cmd_id, int channel)
{
struct kvaser_cmd *cmd;
int rc;
cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->id = cmd_id;
cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple);
cmd->u.simple.channel = channel;
cmd->u.simple.tid = 0xff;
rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
kfree(cmd);
return rc;
}
static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
{
struct kvaser_cmd cmd;
int err;
err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0);
if (err)
return err;
err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_REPLY, &cmd);
if (err)
return err;
switch (dev->card_data.leaf.family) {
case KVASER_LEAF:
dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version);
dev->max_tx_urbs =
le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx);
break;
case KVASER_USBCAN:
dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
dev->max_tx_urbs =
le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
break;
}
return 0;
}
static int kvaser_usb_leaf_get_software_info(struct kvaser_usb *dev)
{
int err;
int retry = 3;
/* On some x86 laptops, plugging a Kvaser device again after
* an unplug makes the firmware always ignore the very first
* command. For such a case, provide some room for retries
* instead of completely exiting the driver.
*/
do {
err = kvaser_usb_leaf_get_software_info_inner(dev);
} while (--retry && err == -ETIMEDOUT);
return err;
}
static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
{
struct kvaser_cmd cmd;
int err;
err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_CARD_INFO, 0);
if (err)
return err;
err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CARD_INFO_REPLY, &cmd);
if (err)
return err;
dev->nchannels = cmd.u.cardinfo.nchannels;
if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES ||
(dev->card_data.leaf.family == KVASER_USBCAN &&
dev->nchannels > MAX_USBCAN_NET_DEVICES))
return -EINVAL;
return 0;
}
static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
struct net_device_stats *stats;
struct kvaser_usb_tx_urb_context *context;
struct kvaser_usb_net_priv *priv;
unsigned long flags;
u8 channel, tid;
channel = cmd->u.tx_acknowledge_header.channel;
tid = cmd->u.tx_acknowledge_header.tid;
if (channel >= dev->nchannels) {
dev_err(&dev->intf->dev,
"Invalid channel number (%d)\n", channel);
return;
}
priv = dev->nets[channel];
if (!netif_device_present(priv->netdev))
return;
stats = &priv->netdev->stats;
context = &priv->tx_contexts[tid % dev->max_tx_urbs];
/* Sometimes the state change doesn't come after a bus-off event */
if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) {
struct sk_buff *skb;
struct can_frame *cf;
skb = alloc_can_err_skb(priv->netdev, &cf);
if (skb) {
cf->can_id |= CAN_ERR_RESTARTED;
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
netif_rx(skb);
} else {
netdev_err(priv->netdev,
"No memory left for err_skb\n");
}
priv->can.can_stats.restarts++;
netif_carrier_on(priv->netdev);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
stats->tx_packets++;
stats->tx_bytes += context->dlc;
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
can_get_echo_skb(priv->netdev, context->echo_index);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
}
static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
u8 cmd_id)
{
struct kvaser_cmd *cmd;
int err;
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return -ENOMEM;
cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple);
cmd->id = cmd_id;
cmd->u.simple.channel = priv->channel;
err = kvaser_usb_send_cmd_async(priv, cmd, cmd->len);
if (err)
kfree(cmd);
return err;
}
static void
kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
const struct kvaser_usb_err_summary *es,
struct can_frame *cf)
{
struct kvaser_usb *dev = priv->dev;
struct net_device_stats *stats = &priv->netdev->stats;
enum can_state cur_state, new_state, tx_state, rx_state;
netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status);
new_state = priv->can.state;
cur_state = priv->can.state;
if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
new_state = CAN_STATE_BUS_OFF;
} else if (es->status & M16C_STATE_BUS_PASSIVE) {
new_state = CAN_STATE_ERROR_PASSIVE;
} else if (es->status & M16C_STATE_BUS_ERROR) {
/* Guard against spurious error events after a busoff */
if (cur_state < CAN_STATE_BUS_OFF) {
if (es->txerr >= 128 || es->rxerr >= 128)
new_state = CAN_STATE_ERROR_PASSIVE;
else if (es->txerr >= 96 || es->rxerr >= 96)
new_state = CAN_STATE_ERROR_WARNING;
else if (cur_state > CAN_STATE_ERROR_ACTIVE)
new_state = CAN_STATE_ERROR_ACTIVE;
}
}
if (!es->status)
new_state = CAN_STATE_ERROR_ACTIVE;
if (new_state != cur_state) {
tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
rx_state = (es->txerr <= es->rxerr) ? new_state : 0;
can_change_state(priv->netdev, cf, tx_state, rx_state);
}
if (priv->can.restart_ms &&
cur_state >= CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF)
priv->can.can_stats.restarts++;
switch (dev->card_data.leaf.family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
priv->can.can_stats.bus_error++;
stats->rx_errors++;
}
break;
case KVASER_USBCAN:
if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR)
stats->tx_errors++;
if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR)
stats->rx_errors++;
if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR)
priv->can.can_stats.bus_error++;
break;
}
priv->bec.txerr = es->txerr;
priv->bec.rxerr = es->rxerr;
}
static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
const struct kvaser_usb_err_summary *es)
{
struct can_frame *cf;
struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG,
.can_dlc = CAN_ERR_DLC };
struct sk_buff *skb;
struct net_device_stats *stats;
struct kvaser_usb_net_priv *priv;
enum can_state old_state, new_state;
if (es->channel >= dev->nchannels) {
dev_err(&dev->intf->dev,
"Invalid channel number (%d)\n", es->channel);
return;
}
priv = dev->nets[es->channel];
stats = &priv->netdev->stats;
/* Update all of the CAN interface's state and error counters before
* trying any memory allocation that can actually fail with -ENOMEM.
*
* We send a temporary stack-allocated error CAN frame to
* can_change_state() for the very same reason.
*
* TODO: Split can_change_state() responsibility between updating the
* CAN interface's state and counters, and the setting up of CAN error
* frame ID and data to userspace. Remove stack allocation afterwards.
*/
old_state = priv->can.state;
kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
new_state = priv->can.state;
skb = alloc_can_err_skb(priv->netdev, &cf);
if (!skb) {
stats->rx_dropped++;
return;
}
memcpy(cf, &tmp_cf, sizeof(*cf));
if (new_state != old_state) {
if (es->status &
(M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
if (!priv->can.restart_ms)
kvaser_usb_leaf_simple_cmd_async(priv,
CMD_STOP_CHIP);
netif_carrier_off(priv->netdev);
}
if (priv->can.restart_ms &&
old_state >= CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF) {
cf->can_id |= CAN_ERR_RESTARTED;
netif_carrier_on(priv->netdev);
}
}
switch (dev->card_data.leaf.family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
if (es->leaf.error_factor & M16C_EF_ACKE)
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
if (es->leaf.error_factor & M16C_EF_CRCE)
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
if (es->leaf.error_factor & M16C_EF_FORME)
cf->data[2] |= CAN_ERR_PROT_FORM;
if (es->leaf.error_factor & M16C_EF_STFE)
cf->data[2] |= CAN_ERR_PROT_STUFF;
if (es->leaf.error_factor & M16C_EF_BITE0)
cf->data[2] |= CAN_ERR_PROT_BIT0;
if (es->leaf.error_factor & M16C_EF_BITE1)
cf->data[2] |= CAN_ERR_PROT_BIT1;
if (es->leaf.error_factor & M16C_EF_TRE)
cf->data[2] |= CAN_ERR_PROT_TX;
}
break;
case KVASER_USBCAN:
if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR)
cf->can_id |= CAN_ERR_BUSERROR;
break;
}
cf->data[6] = es->txerr;
cf->data[7] = es->rxerr;
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
netif_rx(skb);
}
/* For USBCAN, report error to userspace if the channels's errors counter
* has changed, or we're the only channel seeing a bus error state.
*/
static void
kvaser_usb_leaf_usbcan_conditionally_rx_error(const struct kvaser_usb *dev,
struct kvaser_usb_err_summary *es)
{
struct kvaser_usb_net_priv *priv;
unsigned int channel;
bool report_error;
channel = es->channel;
if (channel >= dev->nchannels) {
dev_err(&dev->intf->dev,
"Invalid channel number (%d)\n", channel);
return;
}
priv = dev->nets[channel];
report_error = false;
if (es->txerr != priv->bec.txerr) {
es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR;
report_error = true;
}
if (es->rxerr != priv->bec.rxerr) {
es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR;
report_error = true;
}
if ((es->status & M16C_STATE_BUS_ERROR) &&
!(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) {
es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR;
report_error = true;
}
if (report_error)
kvaser_usb_leaf_rx_error(dev, es);
}
static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
struct kvaser_usb_err_summary es = { };
switch (cmd->id) {
/* Sometimes errors are sent as unsolicited chip state events */
case CMD_CHIP_STATE_EVENT:
es.channel = cmd->u.usbcan.chip_state_event.channel;
es.status = cmd->u.usbcan.chip_state_event.status;
es.txerr = cmd->u.usbcan.chip_state_event.tx_errors_count;
es.rxerr = cmd->u.usbcan.chip_state_event.rx_errors_count;
kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
break;
case CMD_CAN_ERROR_EVENT:
es.channel = 0;
es.status = cmd->u.usbcan.error_event.status_ch0;
es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0;
es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0;
es.usbcan.other_ch_status =
cmd->u.usbcan.error_event.status_ch1;
kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
/* The USBCAN firmware supports up to 2 channels.
* Now that ch0 was checked, check if ch1 has any errors.
*/
if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
es.channel = 1;
es.status = cmd->u.usbcan.error_event.status_ch1;
es.txerr =
cmd->u.usbcan.error_event.tx_errors_count_ch1;
es.rxerr =
cmd->u.usbcan.error_event.rx_errors_count_ch1;
es.usbcan.other_ch_status =
cmd->u.usbcan.error_event.status_ch0;
kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
}
break;
default:
dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id);
}
}
static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
struct kvaser_usb_err_summary es = { };
switch (cmd->id) {
case CMD_CAN_ERROR_EVENT:
es.channel = cmd->u.leaf.error_event.channel;
es.status = cmd->u.leaf.error_event.status;
es.txerr = cmd->u.leaf.error_event.tx_errors_count;
es.rxerr = cmd->u.leaf.error_event.rx_errors_count;
es.leaf.error_factor = cmd->u.leaf.error_event.error_factor;
break;
case CMD_LEAF_LOG_MESSAGE:
es.channel = cmd->u.leaf.log_message.channel;
es.status = cmd->u.leaf.log_message.data[0];
es.txerr = cmd->u.leaf.log_message.data[2];
es.rxerr = cmd->u.leaf.log_message.data[3];
es.leaf.error_factor = cmd->u.leaf.log_message.data[1];
break;
case CMD_CHIP_STATE_EVENT:
es.channel = cmd->u.leaf.chip_state_event.channel;
es.status = cmd->u.leaf.chip_state_event.status;
es.txerr = cmd->u.leaf.chip_state_event.tx_errors_count;
es.rxerr = cmd->u.leaf.chip_state_event.rx_errors_count;
es.leaf.error_factor = 0;
break;
default:
dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id);
return;
}
kvaser_usb_leaf_rx_error(dev, &es);
}
static void kvaser_usb_leaf_rx_can_err(const struct kvaser_usb_net_priv *priv,
const struct kvaser_cmd *cmd)
{
if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
MSG_FLAG_NERR)) {
struct net_device_stats *stats = &priv->netdev->stats;
netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n",
cmd->u.rx_can_header.flag);
stats->rx_errors++;
return;
}
if (cmd->u.rx_can_header.flag & MSG_FLAG_OVERRUN)
kvaser_usb_can_rx_over_error(priv->netdev);
}
static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
struct kvaser_usb_net_priv *priv;
struct can_frame *cf;
struct sk_buff *skb;
struct net_device_stats *stats;
u8 channel = cmd->u.rx_can_header.channel;
const u8 *rx_data = NULL; /* GCC */
if (channel >= dev->nchannels) {
dev_err(&dev->intf->dev,
"Invalid channel number (%d)\n", channel);
return;
}
priv = dev->nets[channel];
stats = &priv->netdev->stats;
if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
(dev->card_data.leaf.family == KVASER_LEAF &&
cmd->id == CMD_LEAF_LOG_MESSAGE)) {
kvaser_usb_leaf_leaf_rx_error(dev, cmd);
return;
} else if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
MSG_FLAG_NERR |
MSG_FLAG_OVERRUN)) {
kvaser_usb_leaf_rx_can_err(priv, cmd);
return;
} else if (cmd->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) {
netdev_warn(priv->netdev,
"Unhandled frame (flags: 0x%02x)\n",
cmd->u.rx_can_header.flag);
return;
}
switch (dev->card_data.leaf.family) {
case KVASER_LEAF:
rx_data = cmd->u.leaf.rx_can.data;
break;
case KVASER_USBCAN:
rx_data = cmd->u.usbcan.rx_can.data;
break;
}
skb = alloc_can_skb(priv->netdev, &cf);
if (!skb) {
stats->rx_dropped++;
return;
}
if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id ==
CMD_LEAF_LOG_MESSAGE) {
cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id);
if (cf->can_id & KVASER_EXTENDED_FRAME)
cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
else
cf->can_id &= CAN_SFF_MASK;
cf->can_dlc = get_can_dlc(cmd->u.leaf.log_message.dlc);
if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME)
cf->can_id |= CAN_RTR_FLAG;
else
memcpy(cf->data, &cmd->u.leaf.log_message.data,
cf->can_dlc);
} else {
cf->can_id = ((rx_data[0] & 0x1f) << 6) | (rx_data[1] & 0x3f);
if (cmd->id == CMD_RX_EXT_MESSAGE) {
cf->can_id <<= 18;
cf->can_id |= ((rx_data[2] & 0x0f) << 14) |
((rx_data[3] & 0xff) << 6) |
(rx_data[4] & 0x3f);
cf->can_id |= CAN_EFF_FLAG;
}
cf->can_dlc = get_can_dlc(rx_data[5]);
if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME)
cf->can_id |= CAN_RTR_FLAG;
else
memcpy(cf->data, &rx_data[6], cf->can_dlc);
}
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
netif_rx(skb);
}
static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
struct kvaser_usb_net_priv *priv;
u8 channel = cmd->u.simple.channel;
if (channel >= dev->nchannels) {
dev_err(&dev->intf->dev,
"Invalid channel number (%d)\n", channel);
return;
}
priv = dev->nets[channel];
if (completion_done(&priv->start_comp) &&
netif_queue_stopped(priv->netdev)) {
netif_wake_queue(priv->netdev);
} else {
netif_start_queue(priv->netdev);
complete(&priv->start_comp);
}
}
static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
struct kvaser_usb_net_priv *priv;
u8 channel = cmd->u.simple.channel;
if (channel >= dev->nchannels) {
dev_err(&dev->intf->dev,
"Invalid channel number (%d)\n", channel);
return;
}
priv = dev->nets[channel];
complete(&priv->stop_comp);
}
static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
switch (cmd->id) {
case CMD_START_CHIP_REPLY:
kvaser_usb_leaf_start_chip_reply(dev, cmd);
break;
case CMD_STOP_CHIP_REPLY:
kvaser_usb_leaf_stop_chip_reply(dev, cmd);
break;
case CMD_RX_STD_MESSAGE:
case CMD_RX_EXT_MESSAGE:
kvaser_usb_leaf_rx_can_msg(dev, cmd);
break;
case CMD_LEAF_LOG_MESSAGE:
if (dev->card_data.leaf.family != KVASER_LEAF)
goto warn;
kvaser_usb_leaf_rx_can_msg(dev, cmd);
break;
case CMD_CHIP_STATE_EVENT:
case CMD_CAN_ERROR_EVENT:
if (dev->card_data.leaf.family == KVASER_LEAF)
kvaser_usb_leaf_leaf_rx_error(dev, cmd);
else
kvaser_usb_leaf_usbcan_rx_error(dev, cmd);
break;
case CMD_TX_ACKNOWLEDGE:
kvaser_usb_leaf_tx_acknowledge(dev, cmd);
break;
/* Ignored commands */
case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
if (dev->card_data.leaf.family != KVASER_USBCAN)
goto warn;
break;
case CMD_FLUSH_QUEUE_REPLY:
if (dev->card_data.leaf.family != KVASER_LEAF)
goto warn;
break;
default:
warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id);
break;
}
}
static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev,
void *buf, int len)
{
struct kvaser_cmd *cmd;
int pos = 0;
while (pos <= len - CMD_HEADER_LEN) {
cmd = buf + pos;
/* The Kvaser firmware can only read and write commands that
* does not cross the USB's endpoint wMaxPacketSize boundary.
* If a follow-up command crosses such boundary, firmware puts
* a placeholder zero-length command in its place then aligns
* the real command to the next max packet size.
*
* Handle such cases or we're going to miss a significant
* number of events in case of a heavy rx load on the bus.
*/
if (cmd->len == 0) {
pos = round_up(pos, le16_to_cpu
(dev->bulk_in->wMaxPacketSize));
continue;
}
if (pos + cmd->len > len) {
dev_err_ratelimited(&dev->intf->dev, "Format error\n");
break;
}
kvaser_usb_leaf_handle_command(dev, cmd);
pos += cmd->len;
}
}
static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv)
{
struct kvaser_cmd *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->id = CMD_SET_CTRL_MODE;
cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_ctrl_mode);
cmd->u.ctrl_mode.tid = 0xff;
cmd->u.ctrl_mode.channel = priv->channel;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT;
else
cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL;
rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len);
kfree(cmd);
return rc;
}
static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
{
int err;
init_completion(&priv->start_comp);
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP,
priv->channel);
if (err)
return err;
if (!wait_for_completion_timeout(&priv->start_comp,
msecs_to_jiffies(KVASER_USB_TIMEOUT)))
return -ETIMEDOUT;
return 0;
}
static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
{
int err;
init_completion(&priv->stop_comp);
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
priv->channel);
if (err)
return err;
if (!wait_for_completion_timeout(&priv->stop_comp,
msecs_to_jiffies(KVASER_USB_TIMEOUT)))
return -ETIMEDOUT;
return 0;
}
static int kvaser_usb_leaf_reset_chip(struct kvaser_usb *dev, int channel)
{
return kvaser_usb_leaf_send_simple_cmd(dev, CMD_RESET_CHIP, channel);
}
static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv)
{
struct kvaser_cmd *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->id = CMD_FLUSH_QUEUE;
cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_flush_queue);
cmd->u.flush_queue.channel = priv->channel;
cmd->u.flush_queue.flags = 0x00;
rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len);
kfree(cmd);
return rc;
}
static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
{
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
dev->cfg = &kvaser_usb_leaf_dev_cfg;
card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
return 0;
}
static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
.name = "kvaser_usb",
.tseg1_min = KVASER_USB_TSEG1_MIN,
.tseg1_max = KVASER_USB_TSEG1_MAX,
.tseg2_min = KVASER_USB_TSEG2_MIN,
.tseg2_max = KVASER_USB_TSEG2_MAX,
.sjw_max = KVASER_USB_SJW_MAX,
.brp_min = KVASER_USB_BRP_MIN,
.brp_max = KVASER_USB_BRP_MAX,
.brp_inc = KVASER_USB_BRP_INC,
};
static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct can_bittiming *bt = &priv->can.bittiming;
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
int rc;
cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->id = CMD_SET_BUS_PARAMS;
cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams);
cmd->u.busparams.channel = priv->channel;
cmd->u.busparams.tid = 0xff;
cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
cmd->u.busparams.sjw = bt->sjw;
cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
cmd->u.busparams.tseg2 = bt->phase_seg2;
if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
cmd->u.busparams.no_samp = 3;
else
cmd->u.busparams.no_samp = 1;
rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
kfree(cmd);
return rc;
}
static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
enum can_mode mode)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
int err;
switch (mode) {
case CAN_MODE_START:
err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP);
if (err)
return err;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int kvaser_usb_leaf_get_berr_counter(const struct net_device *netdev,
struct can_berr_counter *bec)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
*bec = priv->bec;
return 0;
}
static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
{
const struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
int i;
iface_desc = &dev->intf->altsetting[0];
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (!dev->bulk_in && usb_endpoint_is_bulk_in(endpoint))
dev->bulk_in = endpoint;
if (!dev->bulk_out && usb_endpoint_is_bulk_out(endpoint))
dev->bulk_out = endpoint;
/* use first bulk endpoint for in and out */
if (dev->bulk_in && dev->bulk_out)
return 0;
}
return -ENODEV;
}
const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
.dev_set_mode = kvaser_usb_leaf_set_mode,
.dev_set_bittiming = kvaser_usb_leaf_set_bittiming,
.dev_set_data_bittiming = NULL,
.dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter,
.dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints,
.dev_init_card = kvaser_usb_leaf_init_card,
.dev_get_software_info = kvaser_usb_leaf_get_software_info,
.dev_get_software_details = NULL,
.dev_get_card_info = kvaser_usb_leaf_get_card_info,
.dev_get_capabilities = NULL,
.dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode,
.dev_start_chip = kvaser_usb_leaf_start_chip,
.dev_stop_chip = kvaser_usb_leaf_stop_chip,
.dev_reset_chip = kvaser_usb_leaf_reset_chip,
.dev_flush_queue = kvaser_usb_leaf_flush_queue,
.dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback,
.dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd,
};
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = {
.clock = {
.freq = CAN_USB_CLOCK,
},
.timestamp_freq = 1,
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
};
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_1331_0 |
crossvul-cpp_data_bad_295_0 | /* $OpenBSD: auth2-gss.c,v 1.28 2018/07/10 09:13:30 djm Exp $ */
/*
* Copyright (c) 2001-2003 Simon Wilkinson. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef GSSAPI
#include <sys/types.h>
#include "xmalloc.h"
#include "sshkey.h"
#include "hostfile.h"
#include "auth.h"
#include "ssh2.h"
#include "log.h"
#include "dispatch.h"
#include "sshbuf.h"
#include "ssherr.h"
#include "servconf.h"
#include "packet.h"
#include "ssh-gss.h"
#include "monitor_wrap.h"
extern ServerOptions options;
static int input_gssapi_token(int type, u_int32_t plen, struct ssh *ssh);
static int input_gssapi_mic(int type, u_int32_t plen, struct ssh *ssh);
static int input_gssapi_exchange_complete(int type, u_int32_t plen, struct ssh *ssh);
static int input_gssapi_errtok(int, u_int32_t, struct ssh *);
/*
* We only support those mechanisms that we know about (ie ones that we know
* how to check local user kuserok and the like)
*/
static int
userauth_gssapi(struct ssh *ssh)
{
Authctxt *authctxt = ssh->authctxt;
gss_OID_desc goid = {0, NULL};
Gssctxt *ctxt = NULL;
int r, present;
u_int mechs;
OM_uint32 ms;
size_t len;
u_char *doid = NULL;
if (!authctxt->valid || authctxt->user == NULL)
return (0);
if ((r = sshpkt_get_u32(ssh, &mechs)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
if (mechs == 0) {
debug("Mechanism negotiation is not supported");
return (0);
}
do {
mechs--;
free(doid);
present = 0;
if ((r = sshpkt_get_string(ssh, &doid, &len)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
if (len > 2 && doid[0] == SSH_GSS_OIDTYPE &&
doid[1] == len - 2) {
goid.elements = doid + 2;
goid.length = len - 2;
ssh_gssapi_test_oid_supported(&ms, &goid, &present);
} else {
logit("Badly formed OID received");
}
} while (mechs > 0 && !present);
if (!present) {
free(doid);
authctxt->server_caused_failure = 1;
return (0);
}
if (GSS_ERROR(PRIVSEP(ssh_gssapi_server_ctx(&ctxt, &goid)))) {
if (ctxt != NULL)
ssh_gssapi_delete_ctx(&ctxt);
free(doid);
authctxt->server_caused_failure = 1;
return (0);
}
authctxt->methoddata = (void *)ctxt;
/* Return the OID that we received */
if ((r = sshpkt_start(ssh, SSH2_MSG_USERAUTH_GSSAPI_RESPONSE)) != 0 ||
(r = sshpkt_put_string(ssh, doid, len)) != 0 ||
(r = sshpkt_send(ssh)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
free(doid);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_TOKEN, &input_gssapi_token);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_ERRTOK, &input_gssapi_errtok);
authctxt->postponed = 1;
return (0);
}
static int
input_gssapi_token(int type, u_int32_t plen, struct ssh *ssh)
{
Authctxt *authctxt = ssh->authctxt;
Gssctxt *gssctxt;
gss_buffer_desc send_tok = GSS_C_EMPTY_BUFFER;
gss_buffer_desc recv_tok;
OM_uint32 maj_status, min_status, flags;
u_char *p;
size_t len;
int r;
if (authctxt == NULL || (authctxt->methoddata == NULL && !use_privsep))
fatal("No authentication or GSSAPI context");
gssctxt = authctxt->methoddata;
if ((r = sshpkt_get_string(ssh, &p, &len)) != 0 ||
(r = sshpkt_get_end(ssh)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
recv_tok.value = p;
recv_tok.length = len;
maj_status = PRIVSEP(ssh_gssapi_accept_ctx(gssctxt, &recv_tok,
&send_tok, &flags));
free(p);
if (GSS_ERROR(maj_status)) {
if (send_tok.length != 0) {
if ((r = sshpkt_start(ssh,
SSH2_MSG_USERAUTH_GSSAPI_ERRTOK)) != 0 ||
(r = sshpkt_put_string(ssh, send_tok.value,
send_tok.length)) != 0 ||
(r = sshpkt_send(ssh)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
}
authctxt->postponed = 0;
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_TOKEN, NULL);
userauth_finish(ssh, 0, "gssapi-with-mic", NULL);
} else {
if (send_tok.length != 0) {
if ((r = sshpkt_start(ssh,
SSH2_MSG_USERAUTH_GSSAPI_TOKEN)) != 0 ||
(r = sshpkt_put_string(ssh, send_tok.value,
send_tok.length)) != 0 ||
(r = sshpkt_send(ssh)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
}
if (maj_status == GSS_S_COMPLETE) {
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_TOKEN, NULL);
if (flags & GSS_C_INTEG_FLAG)
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_MIC,
&input_gssapi_mic);
else
ssh_dispatch_set(ssh,
SSH2_MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE,
&input_gssapi_exchange_complete);
}
}
gss_release_buffer(&min_status, &send_tok);
return 0;
}
static int
input_gssapi_errtok(int type, u_int32_t plen, struct ssh *ssh)
{
Authctxt *authctxt = ssh->authctxt;
Gssctxt *gssctxt;
gss_buffer_desc send_tok = GSS_C_EMPTY_BUFFER;
gss_buffer_desc recv_tok;
OM_uint32 maj_status;
int r;
u_char *p;
size_t len;
if (authctxt == NULL || (authctxt->methoddata == NULL && !use_privsep))
fatal("No authentication or GSSAPI context");
gssctxt = authctxt->methoddata;
if ((r = sshpkt_get_string(ssh, &p, &len)) != 0 ||
(r = sshpkt_get_end(ssh)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
recv_tok.value = p;
recv_tok.length = len;
/* Push the error token into GSSAPI to see what it says */
maj_status = PRIVSEP(ssh_gssapi_accept_ctx(gssctxt, &recv_tok,
&send_tok, NULL));
free(recv_tok.value);
/* We can't return anything to the client, even if we wanted to */
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_TOKEN, NULL);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_ERRTOK, NULL);
/* The client will have already moved on to the next auth */
gss_release_buffer(&maj_status, &send_tok);
return 0;
}
/*
* This is called when the client thinks we've completed authentication.
* It should only be enabled in the dispatch handler by the function above,
* which only enables it once the GSSAPI exchange is complete.
*/
static int
input_gssapi_exchange_complete(int type, u_int32_t plen, struct ssh *ssh)
{
Authctxt *authctxt = ssh->authctxt;
int r, authenticated;
const char *displayname;
if (authctxt == NULL || (authctxt->methoddata == NULL && !use_privsep))
fatal("No authentication or GSSAPI context");
/*
* We don't need to check the status, because we're only enabled in
* the dispatcher once the exchange is complete
*/
if ((r = sshpkt_get_end(ssh)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
authenticated = PRIVSEP(ssh_gssapi_userok(authctxt->user));
if ((!use_privsep || mm_is_monitor()) &&
(displayname = ssh_gssapi_displayname()) != NULL)
auth2_record_info(authctxt, "%s", displayname);
authctxt->postponed = 0;
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_TOKEN, NULL);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_ERRTOK, NULL);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_MIC, NULL);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE, NULL);
userauth_finish(ssh, authenticated, "gssapi-with-mic", NULL);
return 0;
}
static int
input_gssapi_mic(int type, u_int32_t plen, struct ssh *ssh)
{
Authctxt *authctxt = ssh->authctxt;
Gssctxt *gssctxt;
int r, authenticated = 0;
struct sshbuf *b;
gss_buffer_desc mic, gssbuf;
const char *displayname;
u_char *p;
size_t len;
if (authctxt == NULL || (authctxt->methoddata == NULL && !use_privsep))
fatal("No authentication or GSSAPI context");
gssctxt = authctxt->methoddata;
if ((r = sshpkt_get_string(ssh, &p, &len)) != 0)
fatal("%s: %s", __func__, ssh_err(r));
if ((b = sshbuf_new()) == NULL)
fatal("%s: sshbuf_new failed", __func__);
mic.value = p;
mic.length = len;
ssh_gssapi_buildmic(b, authctxt->user, authctxt->service,
"gssapi-with-mic");
if ((gssbuf.value = sshbuf_mutable_ptr(b)) == NULL)
fatal("%s: sshbuf_mutable_ptr failed", __func__);
gssbuf.length = sshbuf_len(b);
if (!GSS_ERROR(PRIVSEP(ssh_gssapi_checkmic(gssctxt, &gssbuf, &mic))))
authenticated = PRIVSEP(ssh_gssapi_userok(authctxt->user));
else
logit("GSSAPI MIC check failed");
sshbuf_free(b);
free(mic.value);
if ((!use_privsep || mm_is_monitor()) &&
(displayname = ssh_gssapi_displayname()) != NULL)
auth2_record_info(authctxt, "%s", displayname);
authctxt->postponed = 0;
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_TOKEN, NULL);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_ERRTOK, NULL);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_MIC, NULL);
ssh_dispatch_set(ssh, SSH2_MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE, NULL);
userauth_finish(ssh, authenticated, "gssapi-with-mic", NULL);
return 0;
}
Authmethod method_gssapi = {
"gssapi-with-mic",
userauth_gssapi,
&options.gss_authentication
};
#endif
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_295_0 |
crossvul-cpp_data_bad_4240_3 | // SPDX-License-Identifier: GPL-2.0
/*
* This is a maximally equidistributed combined Tausworthe generator
* based on code from GNU Scientific Library 1.5 (30 Jun 2004)
*
* lfsr113 version:
*
* x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
*
* s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13))
* s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27))
* s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21))
* s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12))
*
* The period of this generator is about 2^113 (see erratum paper).
*
* From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
* Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
* http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
* ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
*
* There is an erratum in the paper "Tables of Maximally Equidistributed
* Combined LFSR Generators", Mathematics of Computation, 68, 225 (1999),
* 261--269: http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
*
* ... the k_j most significant bits of z_j must be non-zero,
* for each j. (Note: this restriction also applies to the
* computer code given in [4], but was mistakenly not mentioned
* in that paper.)
*
* This affects the seeding procedure by imposing the requirement
* s1 > 1, s2 > 7, s3 > 15, s4 > 127.
*/
#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <asm/unaligned.h>
#ifdef CONFIG_RANDOM32_SELFTEST
static void __init prandom_state_selftest(void);
#else
static inline void prandom_state_selftest(void)
{
}
#endif
static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
/**
* prandom_u32_state - seeded pseudo-random number generator.
* @state: pointer to state structure holding seeded state.
*
* This is used for pseudo-randomness with no outside seeding.
* For more random results, use prandom_u32().
*/
u32 prandom_u32_state(struct rnd_state *state)
{
#define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b)
state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U);
state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U);
state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U);
state->s4 = TAUSWORTHE(state->s4, 3U, 12U, 4294967168U, 13U);
return (state->s1 ^ state->s2 ^ state->s3 ^ state->s4);
}
EXPORT_SYMBOL(prandom_u32_state);
/**
* prandom_u32 - pseudo random number generator
*
* A 32 bit pseudo-random number is generated using a fast
* algorithm suitable for simulation. This algorithm is NOT
* considered safe for cryptographic use.
*/
u32 prandom_u32(void)
{
struct rnd_state *state = &get_cpu_var(net_rand_state);
u32 res;
res = prandom_u32_state(state);
put_cpu_var(net_rand_state);
return res;
}
EXPORT_SYMBOL(prandom_u32);
/**
* prandom_bytes_state - get the requested number of pseudo-random bytes
*
* @state: pointer to state structure holding seeded state.
* @buf: where to copy the pseudo-random bytes to
* @bytes: the requested number of bytes
*
* This is used for pseudo-randomness with no outside seeding.
* For more random results, use prandom_bytes().
*/
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
{
u8 *ptr = buf;
while (bytes >= sizeof(u32)) {
put_unaligned(prandom_u32_state(state), (u32 *) ptr);
ptr += sizeof(u32);
bytes -= sizeof(u32);
}
if (bytes > 0) {
u32 rem = prandom_u32_state(state);
do {
*ptr++ = (u8) rem;
bytes--;
rem >>= BITS_PER_BYTE;
} while (bytes > 0);
}
}
EXPORT_SYMBOL(prandom_bytes_state);
/**
* prandom_bytes - get the requested number of pseudo-random bytes
* @buf: where to copy the pseudo-random bytes to
* @bytes: the requested number of bytes
*/
void prandom_bytes(void *buf, size_t bytes)
{
struct rnd_state *state = &get_cpu_var(net_rand_state);
prandom_bytes_state(state, buf, bytes);
put_cpu_var(net_rand_state);
}
EXPORT_SYMBOL(prandom_bytes);
static void prandom_warmup(struct rnd_state *state)
{
/* Calling RNG ten times to satisfy recurrence condition */
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
}
static u32 __extract_hwseed(void)
{
unsigned int val = 0;
(void)(arch_get_random_seed_int(&val) ||
arch_get_random_int(&val));
return val;
}
static void prandom_seed_early(struct rnd_state *state, u32 seed,
bool mix_with_hwseed)
{
#define LCG(x) ((x) * 69069U) /* super-duper LCG */
#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
}
/**
* prandom_seed - add entropy to pseudo random number generator
* @entropy: entropy value
*
* Add some additional entropy to the prandom pool.
*/
void prandom_seed(u32 entropy)
{
int i;
/*
* No locking on the CPUs, but then somewhat random results are, well,
* expected.
*/
for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state, i);
state->s1 = __seed(state->s1 ^ entropy, 2U);
prandom_warmup(state);
}
}
EXPORT_SYMBOL(prandom_seed);
/*
* Generate some initially weak seeding values to allow
* to start the prandom_u32() engine.
*/
static int __init prandom_init(void)
{
int i;
prandom_state_selftest();
for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state, i);
u32 weak_seed = (i + jiffies) ^ random_get_entropy();
prandom_seed_early(state, weak_seed, true);
prandom_warmup(state);
}
return 0;
}
core_initcall(prandom_init);
static void __prandom_timer(struct timer_list *unused);
static DEFINE_TIMER(seed_timer, __prandom_timer);
static void __prandom_timer(struct timer_list *unused)
{
u32 entropy;
unsigned long expires;
get_random_bytes(&entropy, sizeof(entropy));
prandom_seed(entropy);
/* reseed every ~60 seconds, in [40 .. 80) interval with slack */
expires = 40 + prandom_u32_max(40);
seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
add_timer(&seed_timer);
}
static void __init __prandom_start_seed_timer(void)
{
seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
add_timer(&seed_timer);
}
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
{
int i;
for_each_possible_cpu(i) {
struct rnd_state *state = per_cpu_ptr(pcpu_state, i);
u32 seeds[4];
get_random_bytes(&seeds, sizeof(seeds));
state->s1 = __seed(seeds[0], 2U);
state->s2 = __seed(seeds[1], 8U);
state->s3 = __seed(seeds[2], 16U);
state->s4 = __seed(seeds[3], 128U);
prandom_warmup(state);
}
}
EXPORT_SYMBOL(prandom_seed_full_state);
/*
* Generate better values after random number generator
* is fully initialized.
*/
static void __prandom_reseed(bool late)
{
unsigned long flags;
static bool latch = false;
static DEFINE_SPINLOCK(lock);
/* Asking for random bytes might result in bytes getting
* moved into the nonblocking pool and thus marking it
* as initialized. In this case we would double back into
* this function and attempt to do a late reseed.
* Ignore the pointless attempt to reseed again if we're
* already waiting for bytes when the nonblocking pool
* got initialized.
*/
/* only allow initial seeding (late == false) once */
if (!spin_trylock_irqsave(&lock, flags))
return;
if (latch && !late)
goto out;
latch = true;
prandom_seed_full_state(&net_rand_state);
out:
spin_unlock_irqrestore(&lock, flags);
}
void prandom_reseed_late(void)
{
__prandom_reseed(true);
}
static int __init prandom_reseed(void)
{
__prandom_reseed(false);
__prandom_start_seed_timer();
return 0;
}
late_initcall(prandom_reseed);
#ifdef CONFIG_RANDOM32_SELFTEST
static struct prandom_test1 {
u32 seed;
u32 result;
} test1[] = {
{ 1U, 3484351685U },
{ 2U, 2623130059U },
{ 3U, 3125133893U },
{ 4U, 984847254U },
};
static struct prandom_test2 {
u32 seed;
u32 iteration;
u32 result;
} test2[] = {
/* Test cases against taus113 from GSL library. */
{ 931557656U, 959U, 2975593782U },
{ 1339693295U, 876U, 3887776532U },
{ 1545556285U, 961U, 1615538833U },
{ 601730776U, 723U, 1776162651U },
{ 1027516047U, 687U, 511983079U },
{ 416526298U, 700U, 916156552U },
{ 1395522032U, 652U, 2222063676U },
{ 366221443U, 617U, 2992857763U },
{ 1539836965U, 714U, 3783265725U },
{ 556206671U, 994U, 799626459U },
{ 684907218U, 799U, 367789491U },
{ 2121230701U, 931U, 2115467001U },
{ 1668516451U, 644U, 3620590685U },
{ 768046066U, 883U, 2034077390U },
{ 1989159136U, 833U, 1195767305U },
{ 536585145U, 996U, 3577259204U },
{ 1008129373U, 642U, 1478080776U },
{ 1740775604U, 939U, 1264980372U },
{ 1967883163U, 508U, 10734624U },
{ 1923019697U, 730U, 3821419629U },
{ 442079932U, 560U, 3440032343U },
{ 1961302714U, 845U, 841962572U },
{ 2030205964U, 962U, 1325144227U },
{ 1160407529U, 507U, 240940858U },
{ 635482502U, 779U, 4200489746U },
{ 1252788931U, 699U, 867195434U },
{ 1961817131U, 719U, 668237657U },
{ 1071468216U, 983U, 917876630U },
{ 1281848367U, 932U, 1003100039U },
{ 582537119U, 780U, 1127273778U },
{ 1973672777U, 853U, 1071368872U },
{ 1896756996U, 762U, 1127851055U },
{ 847917054U, 500U, 1717499075U },
{ 1240520510U, 951U, 2849576657U },
{ 1685071682U, 567U, 1961810396U },
{ 1516232129U, 557U, 3173877U },
{ 1208118903U, 612U, 1613145022U },
{ 1817269927U, 693U, 4279122573U },
{ 1510091701U, 717U, 638191229U },
{ 365916850U, 807U, 600424314U },
{ 399324359U, 702U, 1803598116U },
{ 1318480274U, 779U, 2074237022U },
{ 697758115U, 840U, 1483639402U },
{ 1696507773U, 840U, 577415447U },
{ 2081979121U, 981U, 3041486449U },
{ 955646687U, 742U, 3846494357U },
{ 1250683506U, 749U, 836419859U },
{ 595003102U, 534U, 366794109U },
{ 47485338U, 558U, 3521120834U },
{ 619433479U, 610U, 3991783875U },
{ 704096520U, 518U, 4139493852U },
{ 1712224984U, 606U, 2393312003U },
{ 1318233152U, 922U, 3880361134U },
{ 855572992U, 761U, 1472974787U },
{ 64721421U, 703U, 683860550U },
{ 678931758U, 840U, 380616043U },
{ 692711973U, 778U, 1382361947U },
{ 677703619U, 530U, 2826914161U },
{ 92393223U, 586U, 1522128471U },
{ 1222592920U, 743U, 3466726667U },
{ 358288986U, 695U, 1091956998U },
{ 1935056945U, 958U, 514864477U },
{ 735675993U, 990U, 1294239989U },
{ 1560089402U, 897U, 2238551287U },
{ 70616361U, 829U, 22483098U },
{ 368234700U, 731U, 2913875084U },
{ 20221190U, 879U, 1564152970U },
{ 539444654U, 682U, 1835141259U },
{ 1314987297U, 840U, 1801114136U },
{ 2019295544U, 645U, 3286438930U },
{ 469023838U, 716U, 1637918202U },
{ 1843754496U, 653U, 2562092152U },
{ 400672036U, 809U, 4264212785U },
{ 404722249U, 965U, 2704116999U },
{ 600702209U, 758U, 584979986U },
{ 519953954U, 667U, 2574436237U },
{ 1658071126U, 694U, 2214569490U },
{ 420480037U, 749U, 3430010866U },
{ 690103647U, 969U, 3700758083U },
{ 1029424799U, 937U, 3787746841U },
{ 2012608669U, 506U, 3362628973U },
{ 1535432887U, 998U, 42610943U },
{ 1330635533U, 857U, 3040806504U },
{ 1223800550U, 539U, 3954229517U },
{ 1322411537U, 680U, 3223250324U },
{ 1877847898U, 945U, 2915147143U },
{ 1646356099U, 874U, 965988280U },
{ 805687536U, 744U, 4032277920U },
{ 1948093210U, 633U, 1346597684U },
{ 392609744U, 783U, 1636083295U },
{ 690241304U, 770U, 1201031298U },
{ 1360302965U, 696U, 1665394461U },
{ 1220090946U, 780U, 1316922812U },
{ 447092251U, 500U, 3438743375U },
{ 1613868791U, 592U, 828546883U },
{ 523430951U, 548U, 2552392304U },
{ 726692899U, 810U, 1656872867U },
{ 1364340021U, 836U, 3710513486U },
{ 1986257729U, 931U, 935013962U },
{ 407983964U, 921U, 728767059U },
};
static void __init prandom_state_selftest(void)
{
int i, j, errors = 0, runs = 0;
bool error = false;
for (i = 0; i < ARRAY_SIZE(test1); i++) {
struct rnd_state state;
prandom_seed_early(&state, test1[i].seed, false);
prandom_warmup(&state);
if (test1[i].result != prandom_u32_state(&state))
error = true;
}
if (error)
pr_warn("prandom: seed boundary self test failed\n");
else
pr_info("prandom: seed boundary self test passed\n");
for (i = 0; i < ARRAY_SIZE(test2); i++) {
struct rnd_state state;
prandom_seed_early(&state, test2[i].seed, false);
prandom_warmup(&state);
for (j = 0; j < test2[i].iteration - 1; j++)
prandom_u32_state(&state);
if (test2[i].result != prandom_u32_state(&state))
errors++;
runs++;
cond_resched();
}
if (errors)
pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
else
pr_info("prandom: %d self tests passed\n", runs);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_4240_3 |
crossvul-cpp_data_good_1791_0 | /*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/* Bluetooth SCO sockets. */
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/sco.h>
static bool disable_esco;
static const struct proto_ops sco_sock_ops;
static struct bt_sock_list sco_sk_list = {
.lock = __RW_LOCK_UNLOCKED(sco_sk_list.lock)
};
/* ---- SCO connections ---- */
struct sco_conn {
struct hci_conn *hcon;
spinlock_t lock;
struct sock *sk;
unsigned int mtu;
};
#define sco_conn_lock(c) spin_lock(&c->lock);
#define sco_conn_unlock(c) spin_unlock(&c->lock);
static void sco_sock_close(struct sock *sk);
static void sco_sock_kill(struct sock *sk);
/* ----- SCO socket info ----- */
#define sco_pi(sk) ((struct sco_pinfo *) sk)
struct sco_pinfo {
struct bt_sock bt;
bdaddr_t src;
bdaddr_t dst;
__u32 flags;
__u16 setting;
struct sco_conn *conn;
};
/* ---- SCO timers ---- */
#define SCO_CONN_TIMEOUT (HZ * 40)
#define SCO_DISCONN_TIMEOUT (HZ * 2)
static void sco_sock_timeout(unsigned long arg)
{
struct sock *sk = (struct sock *)arg;
BT_DBG("sock %p state %d", sk, sk->sk_state);
bh_lock_sock(sk);
sk->sk_err = ETIMEDOUT;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
sco_sock_kill(sk);
sock_put(sk);
}
static void sco_sock_set_timer(struct sock *sk, long timeout)
{
BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
}
static void sco_sock_clear_timer(struct sock *sk)
{
BT_DBG("sock %p state %d", sk, sk->sk_state);
sk_stop_timer(sk, &sk->sk_timer);
}
/* ---- SCO connections ---- */
static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn = hcon->sco_data;
if (conn)
return conn;
conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL);
if (!conn)
return NULL;
spin_lock_init(&conn->lock);
hcon->sco_data = conn;
conn->hcon = hcon;
if (hdev->sco_mtu > 0)
conn->mtu = hdev->sco_mtu;
else
conn->mtu = 60;
BT_DBG("hcon %p conn %p", hcon, conn);
return conn;
}
/* Delete channel.
* Must be called on the locked socket. */
static void sco_chan_del(struct sock *sk, int err)
{
struct sco_conn *conn;
conn = sco_pi(sk)->conn;
BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
if (conn) {
sco_conn_lock(conn);
conn->sk = NULL;
sco_pi(sk)->conn = NULL;
sco_conn_unlock(conn);
if (conn->hcon)
hci_conn_drop(conn->hcon);
}
sk->sk_state = BT_CLOSED;
sk->sk_err = err;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_ZAPPED);
}
static void sco_conn_del(struct hci_conn *hcon, int err)
{
struct sco_conn *conn = hcon->sco_data;
struct sock *sk;
if (!conn)
return;
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
/* Kill socket */
sco_conn_lock(conn);
sk = conn->sk;
sco_conn_unlock(conn);
if (sk) {
sock_hold(sk);
bh_lock_sock(sk);
sco_sock_clear_timer(sk);
sco_chan_del(sk, err);
bh_unlock_sock(sk);
sco_sock_kill(sk);
sock_put(sk);
}
hcon->sco_data = NULL;
kfree(conn);
}
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
struct sock *parent)
{
BT_DBG("conn %p", conn);
sco_pi(sk)->conn = conn;
conn->sk = sk;
if (parent)
bt_accept_enqueue(parent, sk);
}
static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
struct sock *parent)
{
int err = 0;
sco_conn_lock(conn);
if (conn->sk)
err = -EBUSY;
else
__sco_chan_add(conn, sk, parent);
sco_conn_unlock(conn);
return err;
}
static int sco_connect(struct sock *sk)
{
struct sco_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
int err, type;
BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src);
if (!hdev)
return -EHOSTUNREACH;
hci_dev_lock(hdev);
if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
else
type = SCO_LINK;
if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
(!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
err = -EOPNOTSUPP;
goto done;
}
hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
sco_pi(sk)->setting);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
}
conn = sco_conn_add(hcon);
if (!conn) {
hci_conn_drop(hcon);
err = -ENOMEM;
goto done;
}
/* Update source addr of the socket */
bacpy(&sco_pi(sk)->src, &hcon->src);
err = sco_chan_add(conn, sk, NULL);
if (err)
goto done;
if (hcon->state == BT_CONNECTED) {
sco_sock_clear_timer(sk);
sk->sk_state = BT_CONNECTED;
} else {
sk->sk_state = BT_CONNECT;
sco_sock_set_timer(sk, sk->sk_sndtimeo);
}
done:
hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
int err;
/* Check outgoing MTU */
if (len > conn->mtu)
return -EINVAL;
BT_DBG("sk %p len %d", sk, len);
skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
kfree_skb(skb);
return -EFAULT;
}
hci_send_sco(conn->hcon, skb);
return len;
}
static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
{
struct sock *sk;
sco_conn_lock(conn);
sk = conn->sk;
sco_conn_unlock(conn);
if (!sk)
goto drop;
BT_DBG("sk %p len %d", sk, skb->len);
if (sk->sk_state != BT_CONNECTED)
goto drop;
if (!sock_queue_rcv_skb(sk, skb))
return;
drop:
kfree_skb(skb);
}
/* -------- Socket interface ---------- */
static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
{
struct sock *sk;
sk_for_each(sk, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
if (!bacmp(&sco_pi(sk)->src, ba))
return sk;
}
return NULL;
}
/* Find socket listening on source bdaddr.
* Returns closest match.
*/
static struct sock *sco_get_sock_listen(bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
read_lock(&sco_sk_list.lock);
sk_for_each(sk, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
/* Exact match. */
if (!bacmp(&sco_pi(sk)->src, src))
break;
/* Closest match */
if (!bacmp(&sco_pi(sk)->src, BDADDR_ANY))
sk1 = sk;
}
read_unlock(&sco_sk_list.lock);
return sk ? sk : sk1;
}
static void sco_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
}
static void sco_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
BT_DBG("parent %p", parent);
/* Close not yet accepted channels */
while ((sk = bt_accept_dequeue(parent, NULL))) {
sco_sock_close(sk);
sco_sock_kill(sk);
}
parent->sk_state = BT_CLOSED;
sock_set_flag(parent, SOCK_ZAPPED);
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
static void sco_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
BT_DBG("sk %p state %d", sk, sk->sk_state);
/* Kill poor orphan */
bt_sock_unlink(&sco_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
static void __sco_sock_close(struct sock *sk)
{
BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
switch (sk->sk_state) {
case BT_LISTEN:
sco_sock_cleanup_listen(sk);
break;
case BT_CONNECTED:
case BT_CONFIG:
if (sco_pi(sk)->conn->hcon) {
sk->sk_state = BT_DISCONN;
sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
sco_conn_lock(sco_pi(sk)->conn);
hci_conn_drop(sco_pi(sk)->conn->hcon);
sco_pi(sk)->conn->hcon = NULL;
sco_conn_unlock(sco_pi(sk)->conn);
} else
sco_chan_del(sk, ECONNRESET);
break;
case BT_CONNECT2:
case BT_CONNECT:
case BT_DISCONN:
sco_chan_del(sk, ECONNRESET);
break;
default:
sock_set_flag(sk, SOCK_ZAPPED);
break;
}
}
/* Must be called on unlocked socket. */
static void sco_sock_close(struct sock *sk)
{
sco_sock_clear_timer(sk);
lock_sock(sk);
__sco_sock_close(sk);
release_sock(sk);
sco_sock_kill(sk);
}
static void sco_sock_init(struct sock *sk, struct sock *parent)
{
BT_DBG("sk %p", sk);
if (parent) {
sk->sk_type = parent->sk_type;
bt_sk(sk)->flags = bt_sk(parent)->flags;
security_sk_clone(parent, sk);
}
}
static struct proto sco_proto = {
.name = "SCO",
.owner = THIS_MODULE,
.obj_size = sizeof(struct sco_pinfo)
};
static struct sock *sco_sock_alloc(struct net *net, struct socket *sock,
int proto, gfp_t prio, int kern)
{
struct sock *sk;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, kern);
if (!sk)
return NULL;
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk->sk_destruct = sco_sock_destruct;
sk->sk_sndtimeo = SCO_CONN_TIMEOUT;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk);
bt_sock_link(&sco_sk_list, sk);
return sk;
}
static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_SEQPACKET)
return -ESOCKTNOSUPPORT;
sock->ops = &sco_sock_ops;
sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern);
if (!sk)
return -ENOMEM;
sco_sock_init(sk, NULL);
return 0;
}
static int sco_sock_bind(struct socket *sock, struct sockaddr *addr,
int addr_len)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
if (addr_len < sizeof(struct sockaddr_sco))
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EINVAL;
goto done;
}
bacpy(&sco_pi(sk)->src, &sa->sco_bdaddr);
sk->sk_state = BT_BOUND;
done:
release_sock(sk);
return err;
}
static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
int err;
BT_DBG("sk %p", sk);
if (alen < sizeof(struct sockaddr_sco) ||
addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
return -EBADFD;
if (sk->sk_type != SOCK_SEQPACKET)
return -EINVAL;
lock_sock(sk);
/* Set destination address and psm */
bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
err = sco_connect(sk);
if (err)
goto done;
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
return err;
}
static int sco_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
bdaddr_t *src = &sco_pi(sk)->src;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EINVAL;
goto done;
}
write_lock(&sco_sk_list.lock);
if (__sco_get_sock_listen_by_addr(src)) {
err = -EADDRINUSE;
goto unlock;
}
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = BT_LISTEN;
unlock:
write_unlock(&sco_sk_list.lock);
done:
release_sock(sk);
return err;
}
static int sco_sock_accept(struct socket *sock, struct socket *newsock,
int flags)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *ch;
long timeo;
int err = 0;
lock_sock(sk);
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (1) {
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
ch = bt_accept_dequeue(sk, newsock);
if (ch)
break;
if (!timeo) {
err = -EAGAIN;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
release_sock(sk);
timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
lock_sock(sk);
}
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
BT_DBG("new socket %p", ch);
done:
release_sock(sk);
return err;
}
static int sco_sock_getname(struct socket *sock, struct sockaddr *addr,
int *len, int peer)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
BT_DBG("sock %p, sk %p", sock, sk);
addr->sa_family = AF_BLUETOOTH;
*len = sizeof(struct sockaddr_sco);
if (peer)
bacpy(&sa->sco_bdaddr, &sco_pi(sk)->dst);
else
bacpy(&sa->sco_bdaddr, &sco_pi(sk)->src);
return 0;
}
static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
err = sock_error(sk);
if (err)
return err;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
lock_sock(sk);
if (sk->sk_state == BT_CONNECTED)
err = sco_send_frame(sk, msg, len);
else
err = -ENOTCONN;
release_sock(sk);
return err;
}
static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)
{
struct hci_dev *hdev = conn->hdev;
BT_DBG("conn %p", conn);
conn->state = BT_CONFIG;
if (!lmp_esco_capable(hdev)) {
struct hci_cp_accept_conn_req cp;
bacpy(&cp.bdaddr, &conn->dst);
cp.role = 0x00; /* Ignored */
hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
} else {
struct hci_cp_accept_sync_conn_req cp;
bacpy(&cp.bdaddr, &conn->dst);
cp.pkt_type = cpu_to_le16(conn->pkt_type);
cp.tx_bandwidth = cpu_to_le32(0x00001f40);
cp.rx_bandwidth = cpu_to_le32(0x00001f40);
cp.content_format = cpu_to_le16(setting);
switch (setting & SCO_AIRMODE_MASK) {
case SCO_AIRMODE_TRANSP:
if (conn->pkt_type & ESCO_2EV3)
cp.max_latency = cpu_to_le16(0x0008);
else
cp.max_latency = cpu_to_le16(0x000D);
cp.retrans_effort = 0x02;
break;
case SCO_AIRMODE_CVSD:
cp.max_latency = cpu_to_le16(0xffff);
cp.retrans_effort = 0xff;
break;
}
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
sizeof(cp), &cp);
}
}
static int sco_sock_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sco_pinfo *pi = sco_pi(sk);
lock_sock(sk);
if (sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
sco_conn_defer_accept(pi->conn->hcon, pi->setting);
sk->sk_state = BT_CONFIG;
release_sock(sk);
return 0;
}
release_sock(sk);
return bt_sock_recvmsg(sock, msg, len, flags);
}
static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int len, err = 0;
struct bt_voice voice;
u32 opt;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
else
clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
case BT_VOICE:
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND &&
sk->sk_state != BT_CONNECT2) {
err = -EINVAL;
break;
}
voice.setting = sco_pi(sk)->setting;
len = min_t(unsigned int, sizeof(voice), optlen);
if (copy_from_user((char *)&voice, optval, len)) {
err = -EFAULT;
break;
}
/* Explicitly check for these values */
if (voice.setting != BT_VOICE_TRANSPARENT &&
voice.setting != BT_VOICE_CVSD_16BIT) {
err = -EINVAL;
break;
}
sco_pi(sk)->setting = voice.setting;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_getsockopt_old(struct socket *sock, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct sco_options opts;
struct sco_conninfo cinfo;
int len, err = 0;
BT_DBG("sk %p", sk);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case SCO_OPTIONS:
if (sk->sk_state != BT_CONNECTED &&
!(sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
opts.mtu = sco_pi(sk)->conn->mtu;
BT_DBG("mtu %d", opts.mtu);
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT;
break;
case SCO_CONNINFO:
if (sk->sk_state != BT_CONNECTED &&
!(sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int len, err = 0;
struct bt_voice voice;
BT_DBG("sk %p", sk);
if (level == SOL_SCO)
return sco_sock_getsockopt_old(sock, optname, optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
(u32 __user *)optval))
err = -EFAULT;
break;
case BT_VOICE:
voice.setting = sco_pi(sk)->setting;
len = min_t(unsigned int, len, sizeof(voice));
if (copy_to_user(optval, (char *)&voice, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
sock_hold(sk);
lock_sock(sk);
if (!sk->sk_shutdown) {
sk->sk_shutdown = SHUTDOWN_MASK;
sco_sock_clear_timer(sk);
__sco_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
!(current->flags & PF_EXITING))
err = bt_sock_wait_state(sk, BT_CLOSED,
sk->sk_lingertime);
}
release_sock(sk);
sock_put(sk);
return err;
}
static int sco_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
sco_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
!(current->flags & PF_EXITING)) {
lock_sock(sk);
err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
release_sock(sk);
}
sock_orphan(sk);
sco_sock_kill(sk);
return err;
}
static void sco_conn_ready(struct sco_conn *conn)
{
struct sock *parent;
struct sock *sk = conn->sk;
BT_DBG("conn %p", conn);
if (sk) {
sco_sock_clear_timer(sk);
bh_lock_sock(sk);
sk->sk_state = BT_CONNECTED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
} else {
sco_conn_lock(conn);
if (!conn->hcon) {
sco_conn_unlock(conn);
return;
}
parent = sco_get_sock_listen(&conn->hcon->src);
if (!parent) {
sco_conn_unlock(conn);
return;
}
bh_lock_sock(parent);
sk = sco_sock_alloc(sock_net(parent), NULL,
BTPROTO_SCO, GFP_ATOMIC, 0);
if (!sk) {
bh_unlock_sock(parent);
sco_conn_unlock(conn);
return;
}
sco_sock_init(sk, parent);
bacpy(&sco_pi(sk)->src, &conn->hcon->src);
bacpy(&sco_pi(sk)->dst, &conn->hcon->dst);
hci_conn_hold(conn->hcon);
__sco_chan_add(conn, sk, parent);
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
sk->sk_state = BT_CONNECT2;
else
sk->sk_state = BT_CONNECTED;
/* Wake up parent */
parent->sk_data_ready(parent);
bh_unlock_sock(parent);
sco_conn_unlock(conn);
}
}
/* ----- SCO interface with lower layer (HCI) ----- */
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
{
struct sock *sk;
int lm = 0;
BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
/* Find listening sockets */
read_lock(&sco_sk_list.lock);
sk_for_each(sk, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
if (!bacmp(&sco_pi(sk)->src, &hdev->bdaddr) ||
!bacmp(&sco_pi(sk)->src, BDADDR_ANY)) {
lm |= HCI_LM_ACCEPT;
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
*flags |= HCI_PROTO_DEFER;
break;
}
}
read_unlock(&sco_sk_list.lock);
return lm;
}
static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
{
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return;
BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
if (!status) {
struct sco_conn *conn;
conn = sco_conn_add(hcon);
if (conn)
sco_conn_ready(conn);
} else
sco_conn_del(hcon, bt_to_errno(status));
}
static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
{
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return;
BT_DBG("hcon %p reason %d", hcon, reason);
sco_conn_del(hcon, bt_to_errno(reason));
}
void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
{
struct sco_conn *conn = hcon->sco_data;
if (!conn)
goto drop;
BT_DBG("conn %p len %d", conn, skb->len);
if (skb->len) {
sco_recv_frame(conn, skb);
return;
}
drop:
kfree_skb(skb);
}
static struct hci_cb sco_cb = {
.name = "SCO",
.connect_cfm = sco_connect_cfm,
.disconn_cfm = sco_disconn_cfm,
};
static int sco_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
read_lock(&sco_sk_list.lock);
sk_for_each(sk, &sco_sk_list.head) {
seq_printf(f, "%pMR %pMR %d\n", &sco_pi(sk)->src,
&sco_pi(sk)->dst, sk->sk_state);
}
read_unlock(&sco_sk_list.lock);
return 0;
}
static int sco_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, sco_debugfs_show, inode->i_private);
}
static const struct file_operations sco_debugfs_fops = {
.open = sco_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *sco_debugfs;
static const struct proto_ops sco_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = sco_sock_release,
.bind = sco_sock_bind,
.connect = sco_sock_connect,
.listen = sco_sock_listen,
.accept = sco_sock_accept,
.getname = sco_sock_getname,
.sendmsg = sco_sock_sendmsg,
.recvmsg = sco_sock_recvmsg,
.poll = bt_sock_poll,
.ioctl = bt_sock_ioctl,
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
.shutdown = sco_sock_shutdown,
.setsockopt = sco_sock_setsockopt,
.getsockopt = sco_sock_getsockopt
};
static const struct net_proto_family sco_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = sco_sock_create,
};
int __init sco_init(void)
{
int err;
BUILD_BUG_ON(sizeof(struct sockaddr_sco) > sizeof(struct sockaddr));
err = proto_register(&sco_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_SCO, &sco_sock_family_ops);
if (err < 0) {
BT_ERR("SCO socket registration failed");
goto error;
}
err = bt_procfs_init(&init_net, "sco", &sco_sk_list, NULL);
if (err < 0) {
BT_ERR("Failed to create SCO proc file");
bt_sock_unregister(BTPROTO_SCO);
goto error;
}
BT_INFO("SCO socket layer initialized");
hci_register_cb(&sco_cb);
if (IS_ERR_OR_NULL(bt_debugfs))
return 0;
sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
NULL, &sco_debugfs_fops);
return 0;
error:
proto_unregister(&sco_proto);
return err;
}
void sco_exit(void)
{
bt_procfs_cleanup(&init_net, "sco");
debugfs_remove(sco_debugfs);
hci_unregister_cb(&sco_cb);
bt_sock_unregister(BTPROTO_SCO);
proto_unregister(&sco_proto);
}
module_param(disable_esco, bool, 0644);
MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_1791_0 |
crossvul-cpp_data_good_5695_0 | /*
* net/tipc/socket.c: TIPC socket API
*
* Copyright (c) 2001-2007, 2012 Ericsson AB
* Copyright (c) 2004-2008, 2010-2012, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "port.h"
#include <linux/export.h>
#include <net/sock.h>
#define SS_LISTENING -1 /* socket is listening */
#define SS_READY -2 /* socket is connectionless */
#define CONN_OVERLOAD_LIMIT ((TIPC_FLOW_CONTROL_WIN * 2 + 1) * \
SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
struct tipc_sock {
struct sock sk;
struct tipc_port *p;
struct tipc_portid peer_name;
unsigned int conn_timeout;
};
#define tipc_sk(sk) ((struct tipc_sock *)(sk))
#define tipc_sk_port(sk) (tipc_sk(sk)->p)
#define tipc_rx_ready(sock) (!skb_queue_empty(&sock->sk->sk_receive_queue) || \
(sock->state == SS_DISCONNECTING))
static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
static void wakeupdispatch(struct tipc_port *tport);
static void tipc_data_ready(struct sock *sk, int len);
static void tipc_write_space(struct sock *sk);
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
static const struct proto_ops msg_ops;
static struct proto tipc_proto;
static int sockets_enabled;
/*
* Revised TIPC socket locking policy:
*
* Most socket operations take the standard socket lock when they start
* and hold it until they finish (or until they need to sleep). Acquiring
* this lock grants the owner exclusive access to the fields of the socket
* data structures, with the exception of the backlog queue. A few socket
* operations can be done without taking the socket lock because they only
* read socket information that never changes during the life of the socket.
*
* Socket operations may acquire the lock for the associated TIPC port if they
* need to perform an operation on the port. If any routine needs to acquire
* both the socket lock and the port lock it must take the socket lock first
* to avoid the risk of deadlock.
*
* The dispatcher handling incoming messages cannot grab the socket lock in
* the standard fashion, since invoked it runs at the BH level and cannot block.
* Instead, it checks to see if the socket lock is currently owned by someone,
* and either handles the message itself or adds it to the socket's backlog
* queue; in the latter case the queued message is processed once the process
* owning the socket lock releases it.
*
* NOTE: Releasing the socket lock while an operation is sleeping overcomes
* the problem of a blocked socket operation preventing any other operations
* from occurring. However, applications must be careful if they have
* multiple threads trying to send (or receive) on the same socket, as these
* operations might interfere with each other. For example, doing a connect
* and a receive at the same time might allow the receive to consume the
* ACK message meant for the connect. While additional work could be done
* to try and overcome this, it doesn't seem to be worthwhile at the present.
*
* NOTE: Releasing the socket lock while an operation is sleeping also ensures
* that another operation that must be performed in a non-blocking manner is
* not delayed for very long because the lock has already been taken.
*
* NOTE: This code assumes that certain fields of a port/socket pair are
* constant over its lifetime; such fields can be examined without taking
* the socket lock and/or port lock, and do not need to be re-read even
* after resuming processing after waiting. These fields include:
* - socket type
* - pointer to socket sk structure (aka tipc_sock structure)
* - pointer to port structure
* - port reference
*/
/**
* advance_rx_queue - discard first buffer in socket receive queue
*
* Caller must hold socket lock
*/
static void advance_rx_queue(struct sock *sk)
{
kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
}
/**
* reject_rx_queue - reject all buffers in socket receive queue
*
* Caller must hold socket lock
*/
static void reject_rx_queue(struct sock *sk)
{
struct sk_buff *buf;
while ((buf = __skb_dequeue(&sk->sk_receive_queue)))
tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
}
/**
* tipc_create - create a TIPC socket
* @net: network namespace (must be default network)
* @sock: pre-allocated socket structure
* @protocol: protocol indicator (must be 0)
* @kern: caused by kernel or by userspace?
*
* This routine creates additional data structures used by the TIPC socket,
* initializes them, and links them together.
*
* Returns 0 on success, errno otherwise
*/
static int tipc_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
const struct proto_ops *ops;
socket_state state;
struct sock *sk;
struct tipc_port *tp_ptr;
/* Validate arguments */
if (unlikely(protocol != 0))
return -EPROTONOSUPPORT;
switch (sock->type) {
case SOCK_STREAM:
ops = &stream_ops;
state = SS_UNCONNECTED;
break;
case SOCK_SEQPACKET:
ops = &packet_ops;
state = SS_UNCONNECTED;
break;
case SOCK_DGRAM:
case SOCK_RDM:
ops = &msg_ops;
state = SS_READY;
break;
default:
return -EPROTOTYPE;
}
/* Allocate socket's protocol area */
sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
if (sk == NULL)
return -ENOMEM;
/* Allocate TIPC port for socket to use */
tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
TIPC_LOW_IMPORTANCE);
if (unlikely(!tp_ptr)) {
sk_free(sk);
return -ENOMEM;
}
/* Finish initializing socket data structures */
sock->ops = ops;
sock->state = state;
sock_init_data(sock, sk);
sk->sk_backlog_rcv = backlog_rcv;
sk->sk_data_ready = tipc_data_ready;
sk->sk_write_space = tipc_write_space;
tipc_sk(sk)->p = tp_ptr;
tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
spin_unlock_bh(tp_ptr->lock);
if (sock->state == SS_READY) {
tipc_set_portunreturnable(tp_ptr->ref, 1);
if (sock->type == SOCK_DGRAM)
tipc_set_portunreliable(tp_ptr->ref, 1);
}
return 0;
}
/**
* release - destroy a TIPC socket
* @sock: socket to destroy
*
* This routine cleans up any messages that are still queued on the socket.
* For DGRAM and RDM socket types, all queued messages are rejected.
* For SEQPACKET and STREAM socket types, the first message is rejected
* and any others are discarded. (If the first message on a STREAM socket
* is partially-read, it is discarded and the next one is rejected instead.)
*
* NOTE: Rejected messages are not necessarily returned to the sender! They
* are returned or discarded according to the "destination droppable" setting
* specified for the message by the sender.
*
* Returns 0 on success, errno otherwise
*/
static int release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct tipc_port *tport;
struct sk_buff *buf;
int res;
/*
* Exit if socket isn't fully initialized (occurs when a failed accept()
* releases a pre-allocated child socket that was never used)
*/
if (sk == NULL)
return 0;
tport = tipc_sk_port(sk);
lock_sock(sk);
/*
* Reject all unreceived messages, except on an active connection
* (which disconnects locally & sends a 'FIN+' to peer)
*/
while (sock->state != SS_DISCONNECTING) {
buf = __skb_dequeue(&sk->sk_receive_queue);
if (buf == NULL)
break;
if (TIPC_SKB_CB(buf)->handle != 0)
kfree_skb(buf);
else {
if ((sock->state == SS_CONNECTING) ||
(sock->state == SS_CONNECTED)) {
sock->state = SS_DISCONNECTING;
tipc_disconnect(tport->ref);
}
tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
}
}
/*
* Delete TIPC port; this ensures no more messages are queued
* (also disconnects an active connection & sends a 'FIN-' to peer)
*/
res = tipc_deleteport(tport->ref);
/* Discard any remaining (connection-based) messages in receive queue */
__skb_queue_purge(&sk->sk_receive_queue);
/* Reject any messages that accumulated in backlog queue */
sock->state = SS_DISCONNECTING;
release_sock(sk);
sock_put(sk);
sock->sk = NULL;
return res;
}
/**
* bind - associate or disassocate TIPC name(s) with a socket
* @sock: socket structure
* @uaddr: socket address describing name(s) and desired operation
* @uaddr_len: size of socket address data structure
*
* Name and name sequence binding is indicated using a positive scope value;
* a negative scope value unbinds the specified name. Specifying no name
* (i.e. a socket address length of 0) unbinds all names from the socket.
*
* Returns 0 on success, errno otherwise
*
* NOTE: This routine doesn't need to take the socket lock since it doesn't
* access any non-constant socket information.
*/
static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
{
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
u32 portref = tipc_sk_port(sock->sk)->ref;
if (unlikely(!uaddr_len))
return tipc_withdraw(portref, 0, NULL);
if (uaddr_len < sizeof(struct sockaddr_tipc))
return -EINVAL;
if (addr->family != AF_TIPC)
return -EAFNOSUPPORT;
if (addr->addrtype == TIPC_ADDR_NAME)
addr->addr.nameseq.upper = addr->addr.nameseq.lower;
else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
return -EAFNOSUPPORT;
if (addr->addr.nameseq.type < TIPC_RESERVED_TYPES)
return -EACCES;
return (addr->scope > 0) ?
tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
}
/**
* get_name - get port ID of socket or peer socket
* @sock: socket structure
* @uaddr: area for returned socket address
* @uaddr_len: area for returned length of socket address
* @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
*
* Returns 0 on success, errno otherwise
*
* NOTE: This routine doesn't need to take the socket lock since it only
* accesses socket information that is unchanging (or which changes in
* a completely predictable manner).
*/
static int get_name(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
struct tipc_sock *tsock = tipc_sk(sock->sk);
memset(addr, 0, sizeof(*addr));
if (peer) {
if ((sock->state != SS_CONNECTED) &&
((peer != 2) || (sock->state != SS_DISCONNECTING)))
return -ENOTCONN;
addr->addr.id.ref = tsock->peer_name.ref;
addr->addr.id.node = tsock->peer_name.node;
} else {
addr->addr.id.ref = tsock->p->ref;
addr->addr.id.node = tipc_own_addr;
}
*uaddr_len = sizeof(*addr);
addr->addrtype = TIPC_ADDR_ID;
addr->family = AF_TIPC;
addr->scope = 0;
addr->addr.name.domain = 0;
return 0;
}
/**
* poll - read and possibly block on pollmask
* @file: file structure associated with the socket
* @sock: socket for which to calculate the poll bits
* @wait: ???
*
* Returns pollmask value
*
* COMMENTARY:
* It appears that the usual socket locking mechanisms are not useful here
* since the pollmask info is potentially out-of-date the moment this routine
* exits. TCP and other protocols seem to rely on higher level poll routines
* to handle any preventable race conditions, so TIPC will do the same ...
*
* TIPC sets the returned events as follows:
*
* socket state flags set
* ------------ ---------
* unconnected no read flags
* POLLOUT if port is not congested
*
* connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
* no write flags
*
* connected POLLIN/POLLRDNORM if data in rx queue
* POLLOUT if port is not congested
*
* disconnecting POLLIN/POLLRDNORM/POLLHUP
* no write flags
*
* listening POLLIN if SYN in rx queue
* no write flags
*
* ready POLLIN/POLLRDNORM if data in rx queue
* [connectionless] POLLOUT (since port cannot be congested)
*
* IMPORTANT: The fact that a read or write operation is indicated does NOT
* imply that the operation will succeed, merely that it should be performed
* and will not block.
*/
static unsigned int poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
u32 mask = 0;
sock_poll_wait(file, sk_sleep(sk), wait);
switch ((int)sock->state) {
case SS_UNCONNECTED:
if (!tipc_sk_port(sk)->congested)
mask |= POLLOUT;
break;
case SS_READY:
case SS_CONNECTED:
if (!tipc_sk_port(sk)->congested)
mask |= POLLOUT;
/* fall thru' */
case SS_CONNECTING:
case SS_LISTENING:
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= (POLLIN | POLLRDNORM);
break;
case SS_DISCONNECTING:
mask = (POLLIN | POLLRDNORM | POLLHUP);
break;
}
return mask;
}
/**
* dest_name_check - verify user is permitted to send to specified port name
* @dest: destination address
* @m: descriptor for message to be sent
*
* Prevents restricted configuration commands from being issued by
* unauthorized users.
*
* Returns 0 if permission is granted, otherwise errno
*/
static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
{
struct tipc_cfg_msg_hdr hdr;
if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
return 0;
if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
return 0;
if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
return -EACCES;
if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr)))
return -EMSGSIZE;
if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
return -EFAULT;
if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
return -EACCES;
return 0;
}
/**
* send_msg - send message in connectionless manner
* @iocb: if NULL, indicates that socket lock is already held
* @sock: socket structure
* @m: message to send
* @total_len: length of message
*
* Message must have an destination specified explicitly.
* Used for SOCK_RDM and SOCK_DGRAM messages,
* and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
* (Note: 'SYN+' is prohibited on SOCK_STREAM.)
*
* Returns the number of bytes sent on success, or errno otherwise
*/
static int send_msg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
int needs_conn;
long timeout_val;
int res = -EINVAL;
if (unlikely(!dest))
return -EDESTADDRREQ;
if (unlikely((m->msg_namelen < sizeof(*dest)) ||
(dest->family != AF_TIPC)))
return -EINVAL;
if (total_len > TIPC_MAX_USER_MSG_SIZE)
return -EMSGSIZE;
if (iocb)
lock_sock(sk);
needs_conn = (sock->state != SS_READY);
if (unlikely(needs_conn)) {
if (sock->state == SS_LISTENING) {
res = -EPIPE;
goto exit;
}
if (sock->state != SS_UNCONNECTED) {
res = -EISCONN;
goto exit;
}
if ((tport->published) ||
((sock->type == SOCK_STREAM) && (total_len != 0))) {
res = -EOPNOTSUPP;
goto exit;
}
if (dest->addrtype == TIPC_ADDR_NAME) {
tport->conn_type = dest->addr.name.name.type;
tport->conn_instance = dest->addr.name.name.instance;
}
/* Abort any pending connection attempts (very unlikely) */
reject_rx_queue(sk);
}
timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
do {
if (dest->addrtype == TIPC_ADDR_NAME) {
res = dest_name_check(dest, m);
if (res)
break;
res = tipc_send2name(tport->ref,
&dest->addr.name.name,
dest->addr.name.domain,
m->msg_iovlen,
m->msg_iov,
total_len);
} else if (dest->addrtype == TIPC_ADDR_ID) {
res = tipc_send2port(tport->ref,
&dest->addr.id,
m->msg_iovlen,
m->msg_iov,
total_len);
} else if (dest->addrtype == TIPC_ADDR_MCAST) {
if (needs_conn) {
res = -EOPNOTSUPP;
break;
}
res = dest_name_check(dest, m);
if (res)
break;
res = tipc_multicast(tport->ref,
&dest->addr.nameseq,
m->msg_iovlen,
m->msg_iov,
total_len);
}
if (likely(res != -ELINKCONG)) {
if (needs_conn && (res >= 0))
sock->state = SS_CONNECTING;
break;
}
if (timeout_val <= 0L) {
res = timeout_val ? timeout_val : -EWOULDBLOCK;
break;
}
release_sock(sk);
timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
!tport->congested, timeout_val);
lock_sock(sk);
} while (1);
exit:
if (iocb)
release_sock(sk);
return res;
}
/**
* send_packet - send a connection-oriented message
* @iocb: if NULL, indicates that socket lock is already held
* @sock: socket structure
* @m: message to send
* @total_len: length of message
*
* Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
*
* Returns the number of bytes sent on success, or errno otherwise
*/
static int send_packet(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
long timeout_val;
int res;
/* Handle implied connection establishment */
if (unlikely(dest))
return send_msg(iocb, sock, m, total_len);
if (total_len > TIPC_MAX_USER_MSG_SIZE)
return -EMSGSIZE;
if (iocb)
lock_sock(sk);
timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
do {
if (unlikely(sock->state != SS_CONNECTED)) {
if (sock->state == SS_DISCONNECTING)
res = -EPIPE;
else
res = -ENOTCONN;
break;
}
res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov,
total_len);
if (likely(res != -ELINKCONG))
break;
if (timeout_val <= 0L) {
res = timeout_val ? timeout_val : -EWOULDBLOCK;
break;
}
release_sock(sk);
timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
(!tport->congested || !tport->connected), timeout_val);
lock_sock(sk);
} while (1);
if (iocb)
release_sock(sk);
return res;
}
/**
* send_stream - send stream-oriented data
* @iocb: (unused)
* @sock: socket structure
* @m: data to send
* @total_len: total length of data to be sent
*
* Used for SOCK_STREAM data.
*
* Returns the number of bytes sent on success (or partial success),
* or errno if no data sent
*/
static int send_stream(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
struct msghdr my_msg;
struct iovec my_iov;
struct iovec *curr_iov;
int curr_iovlen;
char __user *curr_start;
u32 hdr_size;
int curr_left;
int bytes_to_send;
int bytes_sent;
int res;
lock_sock(sk);
/* Handle special cases where there is no connection */
if (unlikely(sock->state != SS_CONNECTED)) {
if (sock->state == SS_UNCONNECTED) {
res = send_packet(NULL, sock, m, total_len);
goto exit;
} else if (sock->state == SS_DISCONNECTING) {
res = -EPIPE;
goto exit;
} else {
res = -ENOTCONN;
goto exit;
}
}
if (unlikely(m->msg_name)) {
res = -EISCONN;
goto exit;
}
if (total_len > (unsigned int)INT_MAX) {
res = -EMSGSIZE;
goto exit;
}
/*
* Send each iovec entry using one or more messages
*
* Note: This algorithm is good for the most likely case
* (i.e. one large iovec entry), but could be improved to pass sets
* of small iovec entries into send_packet().
*/
curr_iov = m->msg_iov;
curr_iovlen = m->msg_iovlen;
my_msg.msg_iov = &my_iov;
my_msg.msg_iovlen = 1;
my_msg.msg_flags = m->msg_flags;
my_msg.msg_name = NULL;
bytes_sent = 0;
hdr_size = msg_hdr_sz(&tport->phdr);
while (curr_iovlen--) {
curr_start = curr_iov->iov_base;
curr_left = curr_iov->iov_len;
while (curr_left) {
bytes_to_send = tport->max_pkt - hdr_size;
if (bytes_to_send > TIPC_MAX_USER_MSG_SIZE)
bytes_to_send = TIPC_MAX_USER_MSG_SIZE;
if (curr_left < bytes_to_send)
bytes_to_send = curr_left;
my_iov.iov_base = curr_start;
my_iov.iov_len = bytes_to_send;
res = send_packet(NULL, sock, &my_msg, bytes_to_send);
if (res < 0) {
if (bytes_sent)
res = bytes_sent;
goto exit;
}
curr_left -= bytes_to_send;
curr_start += bytes_to_send;
bytes_sent += bytes_to_send;
}
curr_iov++;
}
res = bytes_sent;
exit:
release_sock(sk);
return res;
}
/**
* auto_connect - complete connection setup to a remote port
* @sock: socket structure
* @msg: peer's response message
*
* Returns 0 on success, errno otherwise
*/
static int auto_connect(struct socket *sock, struct tipc_msg *msg)
{
struct tipc_sock *tsock = tipc_sk(sock->sk);
struct tipc_port *p_ptr;
tsock->peer_name.ref = msg_origport(msg);
tsock->peer_name.node = msg_orignode(msg);
p_ptr = tipc_port_deref(tsock->p->ref);
if (!p_ptr)
return -EINVAL;
__tipc_connect(tsock->p->ref, p_ptr, &tsock->peer_name);
if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)
return -EINVAL;
msg_set_importance(&p_ptr->phdr, (u32)msg_importance(msg));
sock->state = SS_CONNECTED;
return 0;
}
/**
* set_orig_addr - capture sender's address for received message
* @m: descriptor for message info
* @msg: received message header
*
* Note: Address is not captured if not requested by receiver.
*/
static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
{
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
if (addr) {
addr->family = AF_TIPC;
addr->addrtype = TIPC_ADDR_ID;
memset(&addr->addr, 0, sizeof(addr->addr));
addr->addr.id.ref = msg_origport(msg);
addr->addr.id.node = msg_orignode(msg);
addr->addr.name.domain = 0; /* could leave uninitialized */
addr->scope = 0; /* could leave uninitialized */
m->msg_namelen = sizeof(struct sockaddr_tipc);
}
}
/**
* anc_data_recv - optionally capture ancillary data for received message
* @m: descriptor for message info
* @msg: received message header
* @tport: TIPC port associated with message
*
* Note: Ancillary data is not captured if not requested by receiver.
*
* Returns 0 if successful, otherwise errno
*/
static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
struct tipc_port *tport)
{
u32 anc_data[3];
u32 err;
u32 dest_type;
int has_name;
int res;
if (likely(m->msg_controllen == 0))
return 0;
/* Optionally capture errored message object(s) */
err = msg ? msg_errcode(msg) : 0;
if (unlikely(err)) {
anc_data[0] = err;
anc_data[1] = msg_data_sz(msg);
res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
if (res)
return res;
if (anc_data[1]) {
res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
msg_data(msg));
if (res)
return res;
}
}
/* Optionally capture message destination object */
dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
switch (dest_type) {
case TIPC_NAMED_MSG:
has_name = 1;
anc_data[0] = msg_nametype(msg);
anc_data[1] = msg_namelower(msg);
anc_data[2] = msg_namelower(msg);
break;
case TIPC_MCAST_MSG:
has_name = 1;
anc_data[0] = msg_nametype(msg);
anc_data[1] = msg_namelower(msg);
anc_data[2] = msg_nameupper(msg);
break;
case TIPC_CONN_MSG:
has_name = (tport->conn_type != 0);
anc_data[0] = tport->conn_type;
anc_data[1] = tport->conn_instance;
anc_data[2] = tport->conn_instance;
break;
default:
has_name = 0;
}
if (has_name) {
res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
if (res)
return res;
}
return 0;
}
/**
* recv_msg - receive packet-oriented message
* @iocb: (unused)
* @m: descriptor for message info
* @buf_len: total size of user buffer area
* @flags: receive flags
*
* Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
* If the complete message doesn't fit in user area, truncate it.
*
* Returns size of returned message data, errno otherwise
*/
static int recv_msg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t buf_len, int flags)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
struct sk_buff *buf;
struct tipc_msg *msg;
long timeout;
unsigned int sz;
u32 err;
int res;
/* Catch invalid receive requests */
if (unlikely(!buf_len))
return -EINVAL;
lock_sock(sk);
if (unlikely(sock->state == SS_UNCONNECTED)) {
res = -ENOTCONN;
goto exit;
}
/* will be updated in set_orig_addr() if needed */
m->msg_namelen = 0;
timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
restart:
/* Look for a message in receive queue; wait if necessary */
while (skb_queue_empty(&sk->sk_receive_queue)) {
if (sock->state == SS_DISCONNECTING) {
res = -ENOTCONN;
goto exit;
}
if (timeout <= 0L) {
res = timeout ? timeout : -EWOULDBLOCK;
goto exit;
}
release_sock(sk);
timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
tipc_rx_ready(sock),
timeout);
lock_sock(sk);
}
/* Look at first message in receive queue */
buf = skb_peek(&sk->sk_receive_queue);
msg = buf_msg(buf);
sz = msg_data_sz(msg);
err = msg_errcode(msg);
/* Discard an empty non-errored message & try again */
if ((!sz) && (!err)) {
advance_rx_queue(sk);
goto restart;
}
/* Capture sender's address (optional) */
set_orig_addr(m, msg);
/* Capture ancillary data (optional) */
res = anc_data_recv(m, msg, tport);
if (res)
goto exit;
/* Capture message data (if valid) & compute return value (always) */
if (!err) {
if (unlikely(buf_len < sz)) {
sz = buf_len;
m->msg_flags |= MSG_TRUNC;
}
res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg),
m->msg_iov, sz);
if (res)
goto exit;
res = sz;
} else {
if ((sock->state == SS_READY) ||
((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
res = 0;
else
res = -ECONNRESET;
}
/* Consume received message (optional) */
if (likely(!(flags & MSG_PEEK))) {
if ((sock->state != SS_READY) &&
(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
tipc_acknowledge(tport->ref, tport->conn_unacked);
advance_rx_queue(sk);
}
exit:
release_sock(sk);
return res;
}
/**
* recv_stream - receive stream-oriented data
* @iocb: (unused)
* @m: descriptor for message info
* @buf_len: total size of user buffer area
* @flags: receive flags
*
* Used for SOCK_STREAM messages only. If not enough data is available
* will optionally wait for more; never truncates data.
*
* Returns size of returned message data, errno otherwise
*/
static int recv_stream(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t buf_len, int flags)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
struct sk_buff *buf;
struct tipc_msg *msg;
long timeout;
unsigned int sz;
int sz_to_copy, target, needed;
int sz_copied = 0;
u32 err;
int res = 0;
/* Catch invalid receive attempts */
if (unlikely(!buf_len))
return -EINVAL;
lock_sock(sk);
if (unlikely((sock->state == SS_UNCONNECTED) ||
(sock->state == SS_CONNECTING))) {
res = -ENOTCONN;
goto exit;
}
/* will be updated in set_orig_addr() if needed */
m->msg_namelen = 0;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
restart:
/* Look for a message in receive queue; wait if necessary */
while (skb_queue_empty(&sk->sk_receive_queue)) {
if (sock->state == SS_DISCONNECTING) {
res = -ENOTCONN;
goto exit;
}
if (timeout <= 0L) {
res = timeout ? timeout : -EWOULDBLOCK;
goto exit;
}
release_sock(sk);
timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
tipc_rx_ready(sock),
timeout);
lock_sock(sk);
}
/* Look at first message in receive queue */
buf = skb_peek(&sk->sk_receive_queue);
msg = buf_msg(buf);
sz = msg_data_sz(msg);
err = msg_errcode(msg);
/* Discard an empty non-errored message & try again */
if ((!sz) && (!err)) {
advance_rx_queue(sk);
goto restart;
}
/* Optionally capture sender's address & ancillary data of first msg */
if (sz_copied == 0) {
set_orig_addr(m, msg);
res = anc_data_recv(m, msg, tport);
if (res)
goto exit;
}
/* Capture message data (if valid) & compute return value (always) */
if (!err) {
u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
sz -= offset;
needed = (buf_len - sz_copied);
sz_to_copy = (sz <= needed) ? sz : needed;
res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset,
m->msg_iov, sz_to_copy);
if (res)
goto exit;
sz_copied += sz_to_copy;
if (sz_to_copy < sz) {
if (!(flags & MSG_PEEK))
TIPC_SKB_CB(buf)->handle =
(void *)(unsigned long)(offset + sz_to_copy);
goto exit;
}
} else {
if (sz_copied != 0)
goto exit; /* can't add error msg to valid data */
if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
res = 0;
else
res = -ECONNRESET;
}
/* Consume received message (optional) */
if (likely(!(flags & MSG_PEEK))) {
if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
tipc_acknowledge(tport->ref, tport->conn_unacked);
advance_rx_queue(sk);
}
/* Loop around if more data is required */
if ((sz_copied < buf_len) && /* didn't get all requested data */
(!skb_queue_empty(&sk->sk_receive_queue) ||
(sz_copied < target)) && /* and more is ready or required */
(!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
(!err)) /* and haven't reached a FIN */
goto restart;
exit:
release_sock(sk);
return sz_copied ? sz_copied : res;
}
/**
* tipc_write_space - wake up thread if port congestion is released
* @sk: socket
*/
static void tipc_write_space(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
POLLWRNORM | POLLWRBAND);
rcu_read_unlock();
}
/**
* tipc_data_ready - wake up threads to indicate messages have been received
* @sk: socket
* @len: the length of messages
*/
static void tipc_data_ready(struct sock *sk, int len)
{
struct socket_wq *wq;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM | POLLRDBAND);
rcu_read_unlock();
}
/**
* filter_connect - Handle all incoming messages for a connection-based socket
* @tsock: TIPC socket
* @msg: message
*
* Returns TIPC error status code and socket error status code
* once it encounters some errors
*/
static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
{
struct socket *sock = tsock->sk.sk_socket;
struct tipc_msg *msg = buf_msg(*buf);
struct sock *sk = &tsock->sk;
u32 retval = TIPC_ERR_NO_PORT;
int res;
if (msg_mcast(msg))
return retval;
switch ((int)sock->state) {
case SS_CONNECTED:
/* Accept only connection-based messages sent by peer */
if (msg_connected(msg) && tipc_port_peer_msg(tsock->p, msg)) {
if (unlikely(msg_errcode(msg))) {
sock->state = SS_DISCONNECTING;
__tipc_disconnect(tsock->p);
}
retval = TIPC_OK;
}
break;
case SS_CONNECTING:
/* Accept only ACK or NACK message */
if (unlikely(msg_errcode(msg))) {
sock->state = SS_DISCONNECTING;
sk->sk_err = -ECONNREFUSED;
retval = TIPC_OK;
break;
}
if (unlikely(!msg_connected(msg)))
break;
res = auto_connect(sock, msg);
if (res) {
sock->state = SS_DISCONNECTING;
sk->sk_err = res;
retval = TIPC_OK;
break;
}
/* If an incoming message is an 'ACK-', it should be
* discarded here because it doesn't contain useful
* data. In addition, we should try to wake up
* connect() routine if sleeping.
*/
if (msg_data_sz(msg) == 0) {
kfree_skb(*buf);
*buf = NULL;
if (waitqueue_active(sk_sleep(sk)))
wake_up_interruptible(sk_sleep(sk));
}
retval = TIPC_OK;
break;
case SS_LISTENING:
case SS_UNCONNECTED:
/* Accept only SYN message */
if (!msg_connected(msg) && !(msg_errcode(msg)))
retval = TIPC_OK;
break;
case SS_DISCONNECTING:
break;
default:
pr_err("Unknown socket state %u\n", sock->state);
}
return retval;
}
/**
* rcvbuf_limit - get proper overload limit of socket receive queue
* @sk: socket
* @buf: message
*
* For all connection oriented messages, irrespective of importance,
* the default overload value (i.e. 67MB) is set as limit.
*
* For all connectionless messages, by default new queue limits are
* as belows:
*
* TIPC_LOW_IMPORTANCE (5MB)
* TIPC_MEDIUM_IMPORTANCE (10MB)
* TIPC_HIGH_IMPORTANCE (20MB)
* TIPC_CRITICAL_IMPORTANCE (40MB)
*
* Returns overload limit according to corresponding message importance
*/
static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
unsigned int limit;
if (msg_connected(msg))
limit = CONN_OVERLOAD_LIMIT;
else
limit = sk->sk_rcvbuf << (msg_importance(msg) + 5);
return limit;
}
/**
* filter_rcv - validate incoming message
* @sk: socket
* @buf: message
*
* Enqueues message on receive queue if acceptable; optionally handles
* disconnect indication for a connected socket.
*
* Called with socket lock already taken; port lock may also be taken.
*
* Returns TIPC error status code (TIPC_OK if message is not to be rejected)
*/
static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
{
struct socket *sock = sk->sk_socket;
struct tipc_msg *msg = buf_msg(buf);
unsigned int limit = rcvbuf_limit(sk, buf);
u32 res = TIPC_OK;
/* Reject message if it is wrong sort of message for socket */
if (msg_type(msg) > TIPC_DIRECT_MSG)
return TIPC_ERR_NO_PORT;
if (sock->state == SS_READY) {
if (msg_connected(msg))
return TIPC_ERR_NO_PORT;
} else {
res = filter_connect(tipc_sk(sk), &buf);
if (res != TIPC_OK || buf == NULL)
return res;
}
/* Reject message if there isn't room to queue it */
if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
return TIPC_ERR_OVERLOAD;
/* Enqueue message */
TIPC_SKB_CB(buf)->handle = 0;
__skb_queue_tail(&sk->sk_receive_queue, buf);
skb_set_owner_r(buf, sk);
sk->sk_data_ready(sk, 0);
return TIPC_OK;
}
/**
* backlog_rcv - handle incoming message from backlog queue
* @sk: socket
* @buf: message
*
* Caller must hold socket lock, but not port lock.
*
* Returns 0
*/
static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
{
u32 res;
res = filter_rcv(sk, buf);
if (res)
tipc_reject_msg(buf, res);
return 0;
}
/**
* dispatch - handle incoming message
* @tport: TIPC port that received message
* @buf: message
*
* Called with port lock already taken.
*
* Returns TIPC error status code (TIPC_OK if message is not to be rejected)
*/
static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
{
struct sock *sk = (struct sock *)tport->usr_handle;
u32 res;
/*
* Process message if socket is unlocked; otherwise add to backlog queue
*
* This code is based on sk_receive_skb(), but must be distinct from it
* since a TIPC-specific filter/reject mechanism is utilized
*/
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
res = filter_rcv(sk, buf);
} else {
if (sk_add_backlog(sk, buf, rcvbuf_limit(sk, buf)))
res = TIPC_ERR_OVERLOAD;
else
res = TIPC_OK;
}
bh_unlock_sock(sk);
return res;
}
/**
* wakeupdispatch - wake up port after congestion
* @tport: port to wakeup
*
* Called with port lock already taken.
*/
static void wakeupdispatch(struct tipc_port *tport)
{
struct sock *sk = (struct sock *)tport->usr_handle;
sk->sk_write_space(sk);
}
/**
* connect - establish a connection to another TIPC port
* @sock: socket structure
* @dest: socket address for destination port
* @destlen: size of socket address data structure
* @flags: file-related flags associated with socket
*
* Returns 0 on success, errno otherwise
*/
static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
struct msghdr m = {NULL,};
unsigned int timeout;
int res;
lock_sock(sk);
/* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
if (sock->state == SS_READY) {
res = -EOPNOTSUPP;
goto exit;
}
/*
* Reject connection attempt using multicast address
*
* Note: send_msg() validates the rest of the address fields,
* so there's no need to do it here
*/
if (dst->addrtype == TIPC_ADDR_MCAST) {
res = -EINVAL;
goto exit;
}
timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout;
switch (sock->state) {
case SS_UNCONNECTED:
/* Send a 'SYN-' to destination */
m.msg_name = dest;
m.msg_namelen = destlen;
/* If connect is in non-blocking case, set MSG_DONTWAIT to
* indicate send_msg() is never blocked.
*/
if (!timeout)
m.msg_flags = MSG_DONTWAIT;
res = send_msg(NULL, sock, &m, 0);
if ((res < 0) && (res != -EWOULDBLOCK))
goto exit;
/* Just entered SS_CONNECTING state; the only
* difference is that return value in non-blocking
* case is EINPROGRESS, rather than EALREADY.
*/
res = -EINPROGRESS;
break;
case SS_CONNECTING:
res = -EALREADY;
break;
case SS_CONNECTED:
res = -EISCONN;
break;
default:
res = -EINVAL;
goto exit;
}
if (sock->state == SS_CONNECTING) {
if (!timeout)
goto exit;
/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
release_sock(sk);
res = wait_event_interruptible_timeout(*sk_sleep(sk),
sock->state != SS_CONNECTING,
timeout ? (long)msecs_to_jiffies(timeout)
: MAX_SCHEDULE_TIMEOUT);
lock_sock(sk);
if (res <= 0) {
if (res == 0)
res = -ETIMEDOUT;
else
; /* leave "res" unchanged */
goto exit;
}
}
if (unlikely(sock->state == SS_DISCONNECTING))
res = sock_error(sk);
else
res = 0;
exit:
release_sock(sk);
return res;
}
/**
* listen - allow socket to listen for incoming connections
* @sock: socket structure
* @len: (unused)
*
* Returns 0 on success, errno otherwise
*/
static int listen(struct socket *sock, int len)
{
struct sock *sk = sock->sk;
int res;
lock_sock(sk);
if (sock->state != SS_UNCONNECTED)
res = -EINVAL;
else {
sock->state = SS_LISTENING;
res = 0;
}
release_sock(sk);
return res;
}
/**
* accept - wait for connection request
* @sock: listening socket
* @newsock: new socket that is to be connected
* @flags: file-related flags associated with socket
*
* Returns 0 on success, errno otherwise
*/
static int accept(struct socket *sock, struct socket *new_sock, int flags)
{
struct sock *new_sk, *sk = sock->sk;
struct sk_buff *buf;
struct tipc_sock *new_tsock;
struct tipc_port *new_tport;
struct tipc_msg *msg;
u32 new_ref;
int res;
lock_sock(sk);
if (sock->state != SS_LISTENING) {
res = -EINVAL;
goto exit;
}
while (skb_queue_empty(&sk->sk_receive_queue)) {
if (flags & O_NONBLOCK) {
res = -EWOULDBLOCK;
goto exit;
}
release_sock(sk);
res = wait_event_interruptible(*sk_sleep(sk),
(!skb_queue_empty(&sk->sk_receive_queue)));
lock_sock(sk);
if (res)
goto exit;
}
buf = skb_peek(&sk->sk_receive_queue);
res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
if (res)
goto exit;
new_sk = new_sock->sk;
new_tsock = tipc_sk(new_sk);
new_tport = new_tsock->p;
new_ref = new_tport->ref;
msg = buf_msg(buf);
/* we lock on new_sk; but lockdep sees the lock on sk */
lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
/*
* Reject any stray messages received by new socket
* before the socket lock was taken (very, very unlikely)
*/
reject_rx_queue(new_sk);
/* Connect new socket to it's peer */
new_tsock->peer_name.ref = msg_origport(msg);
new_tsock->peer_name.node = msg_orignode(msg);
tipc_connect(new_ref, &new_tsock->peer_name);
new_sock->state = SS_CONNECTED;
tipc_set_portimportance(new_ref, msg_importance(msg));
if (msg_named(msg)) {
new_tport->conn_type = msg_nametype(msg);
new_tport->conn_instance = msg_nameinst(msg);
}
/*
* Respond to 'SYN-' by discarding it & returning 'ACK'-.
* Respond to 'SYN+' by queuing it on new socket.
*/
if (!msg_data_sz(msg)) {
struct msghdr m = {NULL,};
advance_rx_queue(sk);
send_packet(NULL, new_sock, &m, 0);
} else {
__skb_dequeue(&sk->sk_receive_queue);
__skb_queue_head(&new_sk->sk_receive_queue, buf);
skb_set_owner_r(buf, new_sk);
}
release_sock(new_sk);
exit:
release_sock(sk);
return res;
}
/**
* shutdown - shutdown socket connection
* @sock: socket structure
* @how: direction to close (must be SHUT_RDWR)
*
* Terminates connection (if necessary), then purges socket's receive queue.
*
* Returns 0 on success, errno otherwise
*/
static int shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
struct sk_buff *buf;
int res;
if (how != SHUT_RDWR)
return -EINVAL;
lock_sock(sk);
switch (sock->state) {
case SS_CONNECTING:
case SS_CONNECTED:
restart:
/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
buf = __skb_dequeue(&sk->sk_receive_queue);
if (buf) {
if (TIPC_SKB_CB(buf)->handle != 0) {
kfree_skb(buf);
goto restart;
}
tipc_disconnect(tport->ref);
tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
} else {
tipc_shutdown(tport->ref);
}
sock->state = SS_DISCONNECTING;
/* fall through */
case SS_DISCONNECTING:
/* Discard any unreceived messages */
__skb_queue_purge(&sk->sk_receive_queue);
/* Wake up anyone sleeping in poll */
sk->sk_state_change(sk);
res = 0;
break;
default:
res = -ENOTCONN;
}
release_sock(sk);
return res;
}
/**
* setsockopt - set socket option
* @sock: socket structure
* @lvl: option level
* @opt: option identifier
* @ov: pointer to new option value
* @ol: length of option value
*
* For stream sockets only, accepts and ignores all IPPROTO_TCP options
* (to ease compatibility).
*
* Returns 0 on success, errno otherwise
*/
static int setsockopt(struct socket *sock,
int lvl, int opt, char __user *ov, unsigned int ol)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
u32 value;
int res;
if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
return 0;
if (lvl != SOL_TIPC)
return -ENOPROTOOPT;
if (ol < sizeof(value))
return -EINVAL;
res = get_user(value, (u32 __user *)ov);
if (res)
return res;
lock_sock(sk);
switch (opt) {
case TIPC_IMPORTANCE:
res = tipc_set_portimportance(tport->ref, value);
break;
case TIPC_SRC_DROPPABLE:
if (sock->type != SOCK_STREAM)
res = tipc_set_portunreliable(tport->ref, value);
else
res = -ENOPROTOOPT;
break;
case TIPC_DEST_DROPPABLE:
res = tipc_set_portunreturnable(tport->ref, value);
break;
case TIPC_CONN_TIMEOUT:
tipc_sk(sk)->conn_timeout = value;
/* no need to set "res", since already 0 at this point */
break;
default:
res = -EINVAL;
}
release_sock(sk);
return res;
}
/**
* getsockopt - get socket option
* @sock: socket structure
* @lvl: option level
* @opt: option identifier
* @ov: receptacle for option value
* @ol: receptacle for length of option value
*
* For stream sockets only, returns 0 length result for all IPPROTO_TCP options
* (to ease compatibility).
*
* Returns 0 on success, errno otherwise
*/
static int getsockopt(struct socket *sock,
int lvl, int opt, char __user *ov, int __user *ol)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
int len;
u32 value;
int res;
if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
return put_user(0, ol);
if (lvl != SOL_TIPC)
return -ENOPROTOOPT;
res = get_user(len, ol);
if (res)
return res;
lock_sock(sk);
switch (opt) {
case TIPC_IMPORTANCE:
res = tipc_portimportance(tport->ref, &value);
break;
case TIPC_SRC_DROPPABLE:
res = tipc_portunreliable(tport->ref, &value);
break;
case TIPC_DEST_DROPPABLE:
res = tipc_portunreturnable(tport->ref, &value);
break;
case TIPC_CONN_TIMEOUT:
value = tipc_sk(sk)->conn_timeout;
/* no need to set "res", since already 0 at this point */
break;
case TIPC_NODE_RECVQ_DEPTH:
value = 0; /* was tipc_queue_size, now obsolete */
break;
case TIPC_SOCK_RECVQ_DEPTH:
value = skb_queue_len(&sk->sk_receive_queue);
break;
default:
res = -EINVAL;
}
release_sock(sk);
if (res)
return res; /* "get" failed */
if (len < sizeof(value))
return -EINVAL;
if (copy_to_user(ov, &value, sizeof(value)))
return -EFAULT;
return put_user(sizeof(value), ol);
}
/* Protocol switches for the various types of TIPC sockets */
static const struct proto_ops msg_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
.release = release,
.bind = bind,
.connect = connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = get_name,
.poll = poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = shutdown,
.setsockopt = setsockopt,
.getsockopt = getsockopt,
.sendmsg = send_msg,
.recvmsg = recv_msg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage
};
static const struct proto_ops packet_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
.release = release,
.bind = bind,
.connect = connect,
.socketpair = sock_no_socketpair,
.accept = accept,
.getname = get_name,
.poll = poll,
.ioctl = sock_no_ioctl,
.listen = listen,
.shutdown = shutdown,
.setsockopt = setsockopt,
.getsockopt = getsockopt,
.sendmsg = send_packet,
.recvmsg = recv_msg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage
};
static const struct proto_ops stream_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
.release = release,
.bind = bind,
.connect = connect,
.socketpair = sock_no_socketpair,
.accept = accept,
.getname = get_name,
.poll = poll,
.ioctl = sock_no_ioctl,
.listen = listen,
.shutdown = shutdown,
.setsockopt = setsockopt,
.getsockopt = getsockopt,
.sendmsg = send_stream,
.recvmsg = recv_stream,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage
};
static const struct net_proto_family tipc_family_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
.create = tipc_create
};
static struct proto tipc_proto = {
.name = "TIPC",
.owner = THIS_MODULE,
.obj_size = sizeof(struct tipc_sock)
};
/**
* tipc_socket_init - initialize TIPC socket interface
*
* Returns 0 on success, errno otherwise
*/
int tipc_socket_init(void)
{
int res;
res = proto_register(&tipc_proto, 1);
if (res) {
pr_err("Failed to register TIPC protocol type\n");
goto out;
}
res = sock_register(&tipc_family_ops);
if (res) {
pr_err("Failed to register TIPC socket type\n");
proto_unregister(&tipc_proto);
goto out;
}
sockets_enabled = 1;
out:
return res;
}
/**
* tipc_socket_stop - stop TIPC socket interface
*/
void tipc_socket_stop(void)
{
if (!sockets_enabled)
return;
sockets_enabled = 0;
sock_unregister(tipc_family_ops.family);
proto_unregister(&tipc_proto);
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_5695_0 |
crossvul-cpp_data_good_5614_0 | /***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2013, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/***
RECEIVING COOKIE INFORMATION
============================
struct CookieInfo *cookie_init(char *file);
Inits a cookie struct to store data in a local file. This is always
called before any cookies are set.
int cookies_set(struct CookieInfo *cookie, char *cookie_line);
The 'cookie_line' parameter is a full "Set-cookie:" line as
received from a server.
The function need to replace previously stored lines that this new
line superceeds.
It may remove lines that are expired.
It should return an indication of success/error.
SENDING COOKIE INFORMATION
==========================
struct Cookies *cookie_getlist(struct CookieInfo *cookie,
char *host, char *path, bool secure);
For a given host and path, return a linked list of cookies that
the client should send to the server if used now. The secure
boolean informs the cookie if a secure connection is achieved or
not.
It shall only return cookies that haven't expired.
Example set of cookies:
Set-cookie: PRODUCTINFO=webxpress; domain=.fidelity.com; path=/; secure
Set-cookie: PERSONALIZE=none;expires=Monday, 13-Jun-1988 03:04:55 GMT;
domain=.fidelity.com; path=/ftgw; secure
Set-cookie: FidHist=none;expires=Monday, 13-Jun-1988 03:04:55 GMT;
domain=.fidelity.com; path=/; secure
Set-cookie: FidOrder=none;expires=Monday, 13-Jun-1988 03:04:55 GMT;
domain=.fidelity.com; path=/; secure
Set-cookie: DisPend=none;expires=Monday, 13-Jun-1988 03:04:55 GMT;
domain=.fidelity.com; path=/; secure
Set-cookie: FidDis=none;expires=Monday, 13-Jun-1988 03:04:55 GMT;
domain=.fidelity.com; path=/; secure
Set-cookie:
Session_Key@6791a9e0-901a-11d0-a1c8-9b012c88aa77=none;expires=Monday,
13-Jun-1988 03:04:55 GMT; domain=.fidelity.com; path=/; secure
****/
#include "curl_setup.h"
#if !defined(CURL_DISABLE_HTTP) && !defined(CURL_DISABLE_COOKIES)
#define _MPRINTF_REPLACE
#include <curl/mprintf.h>
#include "urldata.h"
#include "cookie.h"
#include "strequal.h"
#include "strtok.h"
#include "sendf.h"
#include "curl_memory.h"
#include "share.h"
#include "strtoofft.h"
#include "rawstr.h"
#include "curl_memrchr.h"
/* The last #include file should be: */
#include "memdebug.h"
static void freecookie(struct Cookie *co)
{
if(co->expirestr)
free(co->expirestr);
if(co->domain)
free(co->domain);
if(co->path)
free(co->path);
if(co->name)
free(co->name);
if(co->value)
free(co->value);
if(co->maxage)
free(co->maxage);
if(co->version)
free(co->version);
free(co);
}
static bool tailmatch(const char *cooke_domain, const char *hostname)
{
size_t cookie_domain_len = strlen(cooke_domain);
size_t hostname_len = strlen(hostname);
if(hostname_len < cookie_domain_len)
return FALSE;
if(!Curl_raw_equal(cooke_domain, hostname+hostname_len-cookie_domain_len))
return FALSE;
/* A lead char of cookie_domain is not '.'.
RFC6265 4.1.2.3. The Domain Attribute says:
For example, if the value of the Domain attribute is
"example.com", the user agent will include the cookie in the Cookie
header when making HTTP requests to example.com, www.example.com, and
www.corp.example.com.
*/
if(hostname_len == cookie_domain_len)
return TRUE;
if('.' == *(hostname + hostname_len - cookie_domain_len - 1))
return TRUE;
return FALSE;
}
/*
* Load cookies from all given cookie files (CURLOPT_COOKIEFILE).
*/
void Curl_cookie_loadfiles(struct SessionHandle *data)
{
struct curl_slist *list = data->change.cookielist;
if(list) {
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
while(list) {
data->cookies = Curl_cookie_init(data,
list->data,
data->cookies,
data->set.cookiesession);
list = list->next;
}
curl_slist_free_all(data->change.cookielist); /* clean up list */
data->change.cookielist = NULL; /* don't do this again! */
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
}
}
/*
* strstore() makes a strdup() on the 'newstr' and if '*str' is non-NULL
* that will be freed before the allocated string is stored there.
*
* It is meant to easily replace strdup()
*/
static void strstore(char **str, const char *newstr)
{
if(*str)
free(*str);
*str = strdup(newstr);
}
/****************************************************************************
*
* Curl_cookie_add()
*
* Add a single cookie line to the cookie keeping object.
*
***************************************************************************/
struct Cookie *
Curl_cookie_add(struct SessionHandle *data,
/* The 'data' pointer here may be NULL at times, and thus
must only be used very carefully for things that can deal
with data being NULL. Such as infof() and similar */
struct CookieInfo *c,
bool httpheader, /* TRUE if HTTP header-style line */
char *lineptr, /* first character of the line */
const char *domain, /* default domain */
const char *path) /* full path used when this cookie is set,
used to get default path for the cookie
unless set */
{
struct Cookie *clist;
char name[MAX_NAME];
struct Cookie *co;
struct Cookie *lastc=NULL;
time_t now = time(NULL);
bool replace_old = FALSE;
bool badcookie = FALSE; /* cookies are good by default. mmmmm yummy */
#ifdef CURL_DISABLE_VERBOSE_STRINGS
(void)data;
#endif
/* First, alloc and init a new struct for it */
co = calloc(1, sizeof(struct Cookie));
if(!co)
return NULL; /* bail out if we're this low on memory */
if(httpheader) {
/* This line was read off a HTTP-header */
const char *ptr;
const char *semiptr;
char *what;
what = malloc(MAX_COOKIE_LINE);
if(!what) {
free(co);
return NULL;
}
semiptr=strchr(lineptr, ';'); /* first, find a semicolon */
while(*lineptr && ISBLANK(*lineptr))
lineptr++;
ptr = lineptr;
do {
/* we have a <what>=<this> pair or a stand-alone word here */
name[0]=what[0]=0; /* init the buffers */
if(1 <= sscanf(ptr, "%" MAX_NAME_TXT "[^;\r\n =]=%"
MAX_COOKIE_LINE_TXT "[^;\r\n]",
name, what)) {
/* Use strstore() below to properly deal with received cookie
headers that have the same string property set more than once,
and then we use the last one. */
const char *whatptr;
bool done = FALSE;
bool sep;
size_t len=strlen(what);
const char *endofn = &ptr[ strlen(name) ];
/* skip trailing spaces in name */
while(*endofn && ISBLANK(*endofn))
endofn++;
/* name ends with a '=' ? */
sep = (*endofn == '=')?TRUE:FALSE;
/* Strip off trailing whitespace from the 'what' */
while(len && ISBLANK(what[len-1])) {
what[len-1]=0;
len--;
}
/* Skip leading whitespace from the 'what' */
whatptr=what;
while(*whatptr && ISBLANK(*whatptr))
whatptr++;
if(!len) {
/* this was a "<name>=" with no content, and we must allow
'secure' and 'httponly' specified this weirdly */
done = TRUE;
if(Curl_raw_equal("secure", name))
co->secure = TRUE;
else if(Curl_raw_equal("httponly", name))
co->httponly = TRUE;
else if(sep)
/* there was a '=' so we're not done parsing this field */
done = FALSE;
}
if(done)
;
else if(Curl_raw_equal("path", name)) {
strstore(&co->path, whatptr);
if(!co->path) {
badcookie = TRUE; /* out of memory bad */
break;
}
}
else if(Curl_raw_equal("domain", name)) {
/* note that this name may or may not have a preceding dot, but
we don't care about that, we treat the names the same anyway */
const char *domptr=whatptr;
const char *nextptr;
int dotcount=1;
/* Count the dots, we need to make sure that there are enough
of them. */
if('.' == whatptr[0])
/* don't count the initial dot, assume it */
domptr++;
do {
nextptr = strchr(domptr, '.');
if(nextptr) {
if(domptr != nextptr)
dotcount++;
domptr = nextptr+1;
}
} while(nextptr);
/* The original Netscape cookie spec defined that this domain name
MUST have three dots (or two if one of the seven holy TLDs),
but it seems that these kinds of cookies are in use "out there"
so we cannot be that strict. I've therefore lowered the check
to not allow less than two dots. */
if(dotcount < 2) {
/* Received and skipped a cookie with a domain using too few
dots. */
badcookie=TRUE; /* mark this as a bad cookie */
infof(data, "skipped cookie with illegal dotcount domain: %s\n",
whatptr);
}
else {
/* Now, we make sure that our host is within the given domain,
or the given domain is not valid and thus cannot be set. */
if('.' == whatptr[0])
whatptr++; /* ignore preceding dot */
if(!domain || tailmatch(whatptr, domain)) {
const char *tailptr=whatptr;
if(tailptr[0] == '.')
tailptr++;
strstore(&co->domain, tailptr); /* don't prefix w/dots
internally */
if(!co->domain) {
badcookie = TRUE;
break;
}
co->tailmatch=TRUE; /* we always do that if the domain name was
given */
}
else {
/* we did not get a tailmatch and then the attempted set domain
is not a domain to which the current host belongs. Mark as
bad. */
badcookie=TRUE;
infof(data, "skipped cookie with bad tailmatch domain: %s\n",
whatptr);
}
}
}
else if(Curl_raw_equal("version", name)) {
strstore(&co->version, whatptr);
if(!co->version) {
badcookie = TRUE;
break;
}
}
else if(Curl_raw_equal("max-age", name)) {
/* Defined in RFC2109:
Optional. The Max-Age attribute defines the lifetime of the
cookie, in seconds. The delta-seconds value is a decimal non-
negative integer. After delta-seconds seconds elapse, the
client should discard the cookie. A value of zero means the
cookie should be discarded immediately.
*/
strstore(&co->maxage, whatptr);
if(!co->maxage) {
badcookie = TRUE;
break;
}
co->expires =
strtol((*co->maxage=='\"')?&co->maxage[1]:&co->maxage[0],NULL,10)
+ (long)now;
}
else if(Curl_raw_equal("expires", name)) {
strstore(&co->expirestr, whatptr);
if(!co->expirestr) {
badcookie = TRUE;
break;
}
/* Note that if the date couldn't get parsed for whatever reason,
the cookie will be treated as a session cookie */
co->expires = curl_getdate(what, &now);
/* Session cookies have expires set to 0 so if we get that back
from the date parser let's add a second to make it a
non-session cookie */
if(co->expires == 0)
co->expires = 1;
else if(co->expires < 0)
co->expires = 0;
}
else if(!co->name) {
co->name = strdup(name);
co->value = strdup(whatptr);
if(!co->name || !co->value) {
badcookie = TRUE;
break;
}
}
/*
else this is the second (or more) name we don't know
about! */
}
else {
/* this is an "illegal" <what>=<this> pair */
}
if(!semiptr || !*semiptr) {
/* we already know there are no more cookies */
semiptr = NULL;
continue;
}
ptr=semiptr+1;
while(*ptr && ISBLANK(*ptr))
ptr++;
semiptr=strchr(ptr, ';'); /* now, find the next semicolon */
if(!semiptr && *ptr)
/* There are no more semicolons, but there's a final name=value pair
coming up */
semiptr=strchr(ptr, '\0');
} while(semiptr);
if(!badcookie && !co->domain) {
if(domain) {
/* no domain was given in the header line, set the default */
co->domain=strdup(domain);
if(!co->domain)
badcookie = TRUE;
}
}
if(!badcookie && !co->path && path) {
/* No path was given in the header line, set the default.
Note that the passed-in path to this function MAY have a '?' and
following part that MUST not be stored as part of the path. */
char *queryp = strchr(path, '?');
/* queryp is where the interesting part of the path ends, so now we
want to the find the last */
char *endslash;
if(!queryp)
endslash = strrchr(path, '/');
else
endslash = memrchr(path, '/', (size_t)(queryp - path));
if(endslash) {
size_t pathlen = (size_t)(endslash-path+1); /* include ending slash */
co->path=malloc(pathlen+1); /* one extra for the zero byte */
if(co->path) {
memcpy(co->path, path, pathlen);
co->path[pathlen]=0; /* zero terminate */
}
else
badcookie = TRUE;
}
}
free(what);
if(badcookie || !co->name) {
/* we didn't get a cookie name or a bad one,
this is an illegal line, bail out */
freecookie(co);
return NULL;
}
}
else {
/* This line is NOT a HTTP header style line, we do offer support for
reading the odd netscape cookies-file format here */
char *ptr;
char *firstptr;
char *tok_buf=NULL;
int fields;
/* IE introduced HTTP-only cookies to prevent XSS attacks. Cookies
marked with httpOnly after the domain name are not accessible
from javascripts, but since curl does not operate at javascript
level, we include them anyway. In Firefox's cookie files, these
lines are preceded with #HttpOnly_ and then everything is
as usual, so we skip 10 characters of the line..
*/
if(strncmp(lineptr, "#HttpOnly_", 10) == 0) {
lineptr += 10;
co->httponly = TRUE;
}
if(lineptr[0]=='#') {
/* don't even try the comments */
free(co);
return NULL;
}
/* strip off the possible end-of-line characters */
ptr=strchr(lineptr, '\r');
if(ptr)
*ptr=0; /* clear it */
ptr=strchr(lineptr, '\n');
if(ptr)
*ptr=0; /* clear it */
firstptr=strtok_r(lineptr, "\t", &tok_buf); /* tokenize it on the TAB */
/* Here's a quick check to eliminate normal HTTP-headers from this */
if(!firstptr || strchr(firstptr, ':')) {
free(co);
return NULL;
}
/* Now loop through the fields and init the struct we already have
allocated */
for(ptr=firstptr, fields=0; ptr && !badcookie;
ptr=strtok_r(NULL, "\t", &tok_buf), fields++) {
switch(fields) {
case 0:
if(ptr[0]=='.') /* skip preceding dots */
ptr++;
co->domain = strdup(ptr);
if(!co->domain)
badcookie = TRUE;
break;
case 1:
/* This field got its explanation on the 23rd of May 2001 by
Andr�s Garc�a:
flag: A TRUE/FALSE value indicating if all machines within a given
domain can access the variable. This value is set automatically by
the browser, depending on the value you set for the domain.
As far as I can see, it is set to true when the cookie says
.domain.com and to false when the domain is complete www.domain.com
*/
co->tailmatch = Curl_raw_equal(ptr, "TRUE")?TRUE:FALSE;
break;
case 2:
/* It turns out, that sometimes the file format allows the path
field to remain not filled in, we try to detect this and work
around it! Andr�s Garc�a made us aware of this... */
if(strcmp("TRUE", ptr) && strcmp("FALSE", ptr)) {
/* only if the path doesn't look like a boolean option! */
co->path = strdup(ptr);
if(!co->path)
badcookie = TRUE;
break;
}
/* this doesn't look like a path, make one up! */
co->path = strdup("/");
if(!co->path)
badcookie = TRUE;
fields++; /* add a field and fall down to secure */
/* FALLTHROUGH */
case 3:
co->secure = Curl_raw_equal(ptr, "TRUE")?TRUE:FALSE;
break;
case 4:
co->expires = curlx_strtoofft(ptr, NULL, 10);
break;
case 5:
co->name = strdup(ptr);
if(!co->name)
badcookie = TRUE;
break;
case 6:
co->value = strdup(ptr);
if(!co->value)
badcookie = TRUE;
break;
}
}
if(6 == fields) {
/* we got a cookie with blank contents, fix it */
co->value = strdup("");
if(!co->value)
badcookie = TRUE;
else
fields++;
}
if(!badcookie && (7 != fields))
/* we did not find the sufficient number of fields */
badcookie = TRUE;
if(badcookie) {
freecookie(co);
return NULL;
}
}
if(!c->running && /* read from a file */
c->newsession && /* clean session cookies */
!co->expires) { /* this is a session cookie since it doesn't expire! */
freecookie(co);
return NULL;
}
co->livecookie = c->running;
/* now, we have parsed the incoming line, we must now check if this
superceeds an already existing cookie, which it may if the previous have
the same domain and path as this */
clist = c->cookies;
replace_old = FALSE;
while(clist) {
if(Curl_raw_equal(clist->name, co->name)) {
/* the names are identical */
if(clist->domain && co->domain) {
if(Curl_raw_equal(clist->domain, co->domain))
/* The domains are identical */
replace_old=TRUE;
}
else if(!clist->domain && !co->domain)
replace_old = TRUE;
if(replace_old) {
/* the domains were identical */
if(clist->path && co->path) {
if(Curl_raw_equal(clist->path, co->path)) {
replace_old = TRUE;
}
else
replace_old = FALSE;
}
else if(!clist->path && !co->path)
replace_old = TRUE;
else
replace_old = FALSE;
}
if(replace_old && !co->livecookie && clist->livecookie) {
/* Both cookies matched fine, except that the already present
cookie is "live", which means it was set from a header, while
the new one isn't "live" and thus only read from a file. We let
live cookies stay alive */
/* Free the newcomer and get out of here! */
freecookie(co);
return NULL;
}
if(replace_old) {
co->next = clist->next; /* get the next-pointer first */
/* then free all the old pointers */
free(clist->name);
if(clist->value)
free(clist->value);
if(clist->domain)
free(clist->domain);
if(clist->path)
free(clist->path);
if(clist->expirestr)
free(clist->expirestr);
if(clist->version)
free(clist->version);
if(clist->maxage)
free(clist->maxage);
*clist = *co; /* then store all the new data */
free(co); /* free the newly alloced memory */
co = clist; /* point to the previous struct instead */
/* We have replaced a cookie, now skip the rest of the list but
make sure the 'lastc' pointer is properly set */
do {
lastc = clist;
clist = clist->next;
} while(clist);
break;
}
}
lastc = clist;
clist = clist->next;
}
if(c->running)
/* Only show this when NOT reading the cookies from a file */
infof(data, "%s cookie %s=\"%s\" for domain %s, path %s, "
"expire %" FORMAT_OFF_T "\n",
replace_old?"Replaced":"Added", co->name, co->value,
co->domain, co->path, co->expires);
if(!replace_old) {
/* then make the last item point on this new one */
if(lastc)
lastc->next = co;
else
c->cookies = co;
c->numcookies++; /* one more cookie in the jar */
}
return co;
}
/*****************************************************************************
*
* Curl_cookie_init()
*
* Inits a cookie struct to read data from a local file. This is always
* called before any cookies are set. File may be NULL.
*
* If 'newsession' is TRUE, discard all "session cookies" on read from file.
*
****************************************************************************/
struct CookieInfo *Curl_cookie_init(struct SessionHandle *data,
const char *file,
struct CookieInfo *inc,
bool newsession)
{
struct CookieInfo *c;
FILE *fp;
bool fromfile=TRUE;
if(NULL == inc) {
/* we didn't get a struct, create one */
c = calloc(1, sizeof(struct CookieInfo));
if(!c)
return NULL; /* failed to get memory */
c->filename = strdup(file?file:"none"); /* copy the name just in case */
}
else {
/* we got an already existing one, use that */
c = inc;
}
c->running = FALSE; /* this is not running, this is init */
if(file && strequal(file, "-")) {
fp = stdin;
fromfile=FALSE;
}
else if(file && !*file) {
/* points to a "" string */
fp = NULL;
}
else
fp = file?fopen(file, "r"):NULL;
c->newsession = newsession; /* new session? */
if(fp) {
char *lineptr;
bool headerline;
char *line = malloc(MAX_COOKIE_LINE);
if(line) {
while(fgets(line, MAX_COOKIE_LINE, fp)) {
if(checkprefix("Set-Cookie:", line)) {
/* This is a cookie line, get it! */
lineptr=&line[11];
headerline=TRUE;
}
else {
lineptr=line;
headerline=FALSE;
}
while(*lineptr && ISBLANK(*lineptr))
lineptr++;
Curl_cookie_add(data, c, headerline, lineptr, NULL, NULL);
}
free(line); /* free the line buffer */
}
if(fromfile)
fclose(fp);
}
c->running = TRUE; /* now, we're running */
return c;
}
/* sort this so that the longest path gets before the shorter path */
static int cookie_sort(const void *p1, const void *p2)
{
struct Cookie *c1 = *(struct Cookie **)p1;
struct Cookie *c2 = *(struct Cookie **)p2;
size_t l1, l2;
/* 1 - compare cookie path lengths */
l1 = c1->path ? strlen(c1->path) : 0;
l2 = c2->path ? strlen(c2->path) : 0;
if(l1 != l2)
return (l2 > l1) ? 1 : -1 ; /* avoid size_t <=> int conversions */
/* 2 - compare cookie domain lengths */
l1 = c1->domain ? strlen(c1->domain) : 0;
l2 = c2->domain ? strlen(c2->domain) : 0;
if(l1 != l2)
return (l2 > l1) ? 1 : -1 ; /* avoid size_t <=> int conversions */
/* 3 - compare cookie names */
if(c1->name && c2->name)
return strcmp(c1->name, c2->name);
/* sorry, can't be more deterministic */
return 0;
}
/*****************************************************************************
*
* Curl_cookie_getlist()
*
* For a given host and path, return a linked list of cookies that the
* client should send to the server if used now. The secure boolean informs
* the cookie if a secure connection is achieved or not.
*
* It shall only return cookies that haven't expired.
*
****************************************************************************/
struct Cookie *Curl_cookie_getlist(struct CookieInfo *c,
const char *host, const char *path,
bool secure)
{
struct Cookie *newco;
struct Cookie *co;
time_t now = time(NULL);
struct Cookie *mainco=NULL;
size_t matches = 0;
if(!c || !c->cookies)
return NULL; /* no cookie struct or no cookies in the struct */
co = c->cookies;
while(co) {
/* only process this cookie if it is not expired or had no expire
date AND that if the cookie requires we're secure we must only
continue if we are! */
if((!co->expires || (co->expires > now)) &&
(co->secure?secure:TRUE)) {
/* now check if the domain is correct */
if(!co->domain ||
(co->tailmatch && tailmatch(co->domain, host)) ||
(!co->tailmatch && Curl_raw_equal(host, co->domain)) ) {
/* the right part of the host matches the domain stuff in the
cookie data */
/* now check the left part of the path with the cookies path
requirement */
if(!co->path ||
/* not using checkprefix() because matching should be
case-sensitive */
!strncmp(co->path, path, strlen(co->path)) ) {
/* and now, we know this is a match and we should create an
entry for the return-linked-list */
newco = malloc(sizeof(struct Cookie));
if(newco) {
/* first, copy the whole source cookie: */
memcpy(newco, co, sizeof(struct Cookie));
/* then modify our next */
newco->next = mainco;
/* point the main to us */
mainco = newco;
matches++;
}
else {
fail:
/* failure, clear up the allocated chain and return NULL */
while(mainco) {
co = mainco->next;
free(mainco);
mainco = co;
}
return NULL;
}
}
}
}
co = co->next;
}
if(matches) {
/* Now we need to make sure that if there is a name appearing more than
once, the longest specified path version comes first. To make this
the swiftest way, we just sort them all based on path length. */
struct Cookie **array;
size_t i;
/* alloc an array and store all cookie pointers */
array = malloc(sizeof(struct Cookie *) * matches);
if(!array)
goto fail;
co = mainco;
for(i=0; co; co = co->next)
array[i++] = co;
/* now sort the cookie pointers in path length order */
qsort(array, matches, sizeof(struct Cookie *), cookie_sort);
/* remake the linked list order according to the new order */
mainco = array[0]; /* start here */
for(i=0; i<matches-1; i++)
array[i]->next = array[i+1];
array[matches-1]->next = NULL; /* terminate the list */
free(array); /* remove the temporary data again */
}
return mainco; /* return the new list */
}
/*****************************************************************************
*
* Curl_cookie_clearall()
*
* Clear all existing cookies and reset the counter.
*
****************************************************************************/
void Curl_cookie_clearall(struct CookieInfo *cookies)
{
if(cookies) {
Curl_cookie_freelist(cookies->cookies, TRUE);
cookies->cookies = NULL;
cookies->numcookies = 0;
}
}
/*****************************************************************************
*
* Curl_cookie_freelist()
*
* Free a list of cookies previously returned by Curl_cookie_getlist();
*
* The 'cookiestoo' argument tells this function whether to just free the
* list or actually also free all cookies within the list as well.
*
****************************************************************************/
void Curl_cookie_freelist(struct Cookie *co, bool cookiestoo)
{
struct Cookie *next;
if(co) {
while(co) {
next = co->next;
if(cookiestoo)
freecookie(co);
else
free(co); /* we only free the struct since the "members" are all just
pointed out in the main cookie list! */
co = next;
}
}
}
/*****************************************************************************
*
* Curl_cookie_clearsess()
*
* Free all session cookies in the cookies list.
*
****************************************************************************/
void Curl_cookie_clearsess(struct CookieInfo *cookies)
{
struct Cookie *first, *curr, *next, *prev = NULL;
if(!cookies || !cookies->cookies)
return;
first = curr = prev = cookies->cookies;
for(; curr; curr = next) {
next = curr->next;
if(!curr->expires) {
if(first == curr)
first = next;
if(prev == curr)
prev = next;
else
prev->next = next;
freecookie(curr);
cookies->numcookies--;
}
else
prev = curr;
}
cookies->cookies = first;
}
/*****************************************************************************
*
* Curl_cookie_cleanup()
*
* Free a "cookie object" previous created with cookie_init().
*
****************************************************************************/
void Curl_cookie_cleanup(struct CookieInfo *c)
{
struct Cookie *co;
struct Cookie *next;
if(c) {
if(c->filename)
free(c->filename);
co = c->cookies;
while(co) {
next = co->next;
freecookie(co);
co = next;
}
free(c); /* free the base struct as well */
}
}
/* get_netscape_format()
*
* Formats a string for Netscape output file, w/o a newline at the end.
*
* Function returns a char * to a formatted line. Has to be free()d
*/
static char *get_netscape_format(const struct Cookie *co)
{
return aprintf(
"%s" /* httponly preamble */
"%s%s\t" /* domain */
"%s\t" /* tailmatch */
"%s\t" /* path */
"%s\t" /* secure */
"%" FORMAT_OFF_T "\t" /* expires */
"%s\t" /* name */
"%s", /* value */
co->httponly?"#HttpOnly_":"",
/* Make sure all domains are prefixed with a dot if they allow
tailmatching. This is Mozilla-style. */
(co->tailmatch && co->domain && co->domain[0] != '.')? ".":"",
co->domain?co->domain:"unknown",
co->tailmatch?"TRUE":"FALSE",
co->path?co->path:"/",
co->secure?"TRUE":"FALSE",
co->expires,
co->name,
co->value?co->value:"");
}
/*
* cookie_output()
*
* Writes all internally known cookies to the specified file. Specify
* "-" as file name to write to stdout.
*
* The function returns non-zero on write failure.
*/
static int cookie_output(struct CookieInfo *c, const char *dumphere)
{
struct Cookie *co;
FILE *out;
bool use_stdout=FALSE;
if((NULL == c) || (0 == c->numcookies))
/* If there are no known cookies, we don't write or even create any
destination file */
return 0;
if(strequal("-", dumphere)) {
/* use stdout */
out = stdout;
use_stdout=TRUE;
}
else {
out = fopen(dumphere, "w");
if(!out)
return 1; /* failure */
}
if(c) {
char *format_ptr;
fputs("# Netscape HTTP Cookie File\n"
"# http://curl.haxx.se/docs/http-cookies.html\n"
"# This file was generated by libcurl! Edit at your own risk.\n\n",
out);
co = c->cookies;
while(co) {
format_ptr = get_netscape_format(co);
if(format_ptr == NULL) {
fprintf(out, "#\n# Fatal libcurl error\n");
if(!use_stdout)
fclose(out);
return 1;
}
fprintf(out, "%s\n", format_ptr);
free(format_ptr);
co=co->next;
}
}
if(!use_stdout)
fclose(out);
return 0;
}
struct curl_slist *Curl_cookie_list(struct SessionHandle *data)
{
struct curl_slist *list = NULL;
struct curl_slist *beg;
struct Cookie *c;
char *line;
if((data->cookies == NULL) ||
(data->cookies->numcookies == 0))
return NULL;
c = data->cookies->cookies;
while(c) {
/* fill the list with _all_ the cookies we know */
line = get_netscape_format(c);
if(!line) {
curl_slist_free_all(list);
return NULL;
}
beg = curl_slist_append(list, line);
free(line);
if(!beg) {
curl_slist_free_all(list);
return NULL;
}
list = beg;
c = c->next;
}
return list;
}
void Curl_flush_cookies(struct SessionHandle *data, int cleanup)
{
if(data->set.str[STRING_COOKIEJAR]) {
if(data->change.cookielist) {
/* If there is a list of cookie files to read, do it first so that
we have all the told files read before we write the new jar.
Curl_cookie_loadfiles() LOCKS and UNLOCKS the share itself! */
Curl_cookie_loadfiles(data);
}
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
/* if we have a destination file for all the cookies to get dumped to */
if(cookie_output(data->cookies, data->set.str[STRING_COOKIEJAR]))
infof(data, "WARNING: failed to save cookies in %s\n",
data->set.str[STRING_COOKIEJAR]);
}
else {
if(cleanup && data->change.cookielist) {
/* since nothing is written, we can just free the list of cookie file
names */
curl_slist_free_all(data->change.cookielist); /* clean up list */
data->change.cookielist = NULL;
}
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
}
if(cleanup && (!data->share || (data->cookies != data->share->cookies))) {
Curl_cookie_cleanup(data->cookies);
}
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
}
#endif /* CURL_DISABLE_HTTP || CURL_DISABLE_COOKIES */
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_5614_0 |
crossvul-cpp_data_good_3839_0 | /* net/atm/common.c - ATM sockets (common part for PVC and SVC) */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/net.h> /* struct socket, struct proto_ops */
#include <linux/atm.h> /* ATM stuff */
#include <linux/atmdev.h>
#include <linux/socket.h> /* SOL_SOCKET */
#include <linux/errno.h> /* error codes */
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/time.h> /* struct timeval */
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <net/sock.h> /* struct sock */
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/atomic.h>
#include "resources.h" /* atm_find_dev */
#include "common.h" /* prototypes */
#include "protocols.h" /* atm_init_<transport> */
#include "addr.h" /* address registry */
#include "signaling.h" /* for WAITING and sigd_attach */
struct hlist_head vcc_hash[VCC_HTABLE_SIZE];
EXPORT_SYMBOL(vcc_hash);
DEFINE_RWLOCK(vcc_sklist_lock);
EXPORT_SYMBOL(vcc_sklist_lock);
static ATOMIC_NOTIFIER_HEAD(atm_dev_notify_chain);
static void __vcc_insert_socket(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
struct hlist_head *head = &vcc_hash[vcc->vci & (VCC_HTABLE_SIZE - 1)];
sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1);
sk_add_node(sk, head);
}
void vcc_insert_socket(struct sock *sk)
{
write_lock_irq(&vcc_sklist_lock);
__vcc_insert_socket(sk);
write_unlock_irq(&vcc_sklist_lock);
}
EXPORT_SYMBOL(vcc_insert_socket);
static void vcc_remove_socket(struct sock *sk)
{
write_lock_irq(&vcc_sklist_lock);
sk_del_node_init(sk);
write_unlock_irq(&vcc_sklist_lock);
}
static struct sk_buff *alloc_tx(struct atm_vcc *vcc, unsigned int size)
{
struct sk_buff *skb;
struct sock *sk = sk_atm(vcc);
if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) {
pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
sk_wmem_alloc_get(sk), size, sk->sk_sndbuf);
return NULL;
}
while (!(skb = alloc_skb(size, GFP_KERNEL)))
schedule();
pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
atomic_add(skb->truesize, &sk->sk_wmem_alloc);
return skb;
}
static void vcc_sock_destruct(struct sock *sk)
{
if (atomic_read(&sk->sk_rmem_alloc))
printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n",
__func__, atomic_read(&sk->sk_rmem_alloc));
if (atomic_read(&sk->sk_wmem_alloc))
printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n",
__func__, atomic_read(&sk->sk_wmem_alloc));
}
static void vcc_def_wakeup(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
wake_up(&wq->wait);
rcu_read_unlock();
}
static inline int vcc_writable(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
return (vcc->qos.txtp.max_sdu +
atomic_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf;
}
static void vcc_write_space(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
if (vcc_writable(sk)) {
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
wake_up_interruptible(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
}
static struct proto vcc_proto = {
.name = "VCC",
.owner = THIS_MODULE,
.obj_size = sizeof(struct atm_vcc),
};
int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
{
struct sock *sk;
struct atm_vcc *vcc;
sock->sk = NULL;
if (sock->type == SOCK_STREAM)
return -EINVAL;
sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sk->sk_state_change = vcc_def_wakeup;
sk->sk_write_space = vcc_write_space;
vcc = atm_sk(sk);
vcc->dev = NULL;
memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc));
memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc));
vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
atomic_set(&sk->sk_wmem_alloc, 1);
atomic_set(&sk->sk_rmem_alloc, 0);
vcc->push = NULL;
vcc->pop = NULL;
vcc->push_oam = NULL;
vcc->vpi = vcc->vci = 0; /* no VCI/VPI yet */
vcc->atm_options = vcc->aal_options = 0;
sk->sk_destruct = vcc_sock_destruct;
return 0;
}
static void vcc_destroy_socket(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
struct sk_buff *skb;
set_bit(ATM_VF_CLOSE, &vcc->flags);
clear_bit(ATM_VF_READY, &vcc->flags);
if (vcc->dev) {
if (vcc->dev->ops->close)
vcc->dev->ops->close(vcc);
if (vcc->push)
vcc->push(vcc, NULL); /* atmarpd has no push */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
atm_return(vcc, skb->truesize);
kfree_skb(skb);
}
module_put(vcc->dev->ops->owner);
atm_dev_put(vcc->dev);
}
vcc_remove_socket(sk);
}
int vcc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (sk) {
lock_sock(sk);
vcc_destroy_socket(sock->sk);
release_sock(sk);
sock_put(sk);
}
return 0;
}
void vcc_release_async(struct atm_vcc *vcc, int reply)
{
struct sock *sk = sk_atm(vcc);
set_bit(ATM_VF_CLOSE, &vcc->flags);
sk->sk_shutdown |= RCV_SHUTDOWN;
sk->sk_err = -reply;
clear_bit(ATM_VF_WAITING, &vcc->flags);
sk->sk_state_change(sk);
}
EXPORT_SYMBOL(vcc_release_async);
void vcc_process_recv_queue(struct atm_vcc *vcc)
{
struct sk_buff_head queue, *rq;
struct sk_buff *skb, *tmp;
unsigned long flags;
__skb_queue_head_init(&queue);
rq = &sk_atm(vcc)->sk_receive_queue;
spin_lock_irqsave(&rq->lock, flags);
skb_queue_splice_init(rq, &queue);
spin_unlock_irqrestore(&rq->lock, flags);
skb_queue_walk_safe(&queue, skb, tmp) {
__skb_unlink(skb, &queue);
vcc->push(vcc, skb);
}
}
EXPORT_SYMBOL(vcc_process_recv_queue);
void atm_dev_signal_change(struct atm_dev *dev, char signal)
{
pr_debug("%s signal=%d dev=%p number=%d dev->signal=%d\n",
__func__, signal, dev, dev->number, dev->signal);
/* atm driver sending invalid signal */
WARN_ON(signal < ATM_PHY_SIG_LOST || signal > ATM_PHY_SIG_FOUND);
if (dev->signal == signal)
return; /* no change */
dev->signal = signal;
atomic_notifier_call_chain(&atm_dev_notify_chain, signal, dev);
}
EXPORT_SYMBOL(atm_dev_signal_change);
void atm_dev_release_vccs(struct atm_dev *dev)
{
int i;
write_lock_irq(&vcc_sklist_lock);
for (i = 0; i < VCC_HTABLE_SIZE; i++) {
struct hlist_head *head = &vcc_hash[i];
struct hlist_node *node, *tmp;
struct sock *s;
struct atm_vcc *vcc;
sk_for_each_safe(s, node, tmp, head) {
vcc = atm_sk(s);
if (vcc->dev == dev) {
vcc_release_async(vcc, -EPIPE);
sk_del_node_init(s);
}
}
}
write_unlock_irq(&vcc_sklist_lock);
}
EXPORT_SYMBOL(atm_dev_release_vccs);
static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
{
int max_sdu;
if (!tp->traffic_class)
return 0;
switch (aal) {
case ATM_AAL0:
max_sdu = ATM_CELL_SIZE-1;
break;
case ATM_AAL34:
max_sdu = ATM_MAX_AAL34_PDU;
break;
default:
pr_warning("AAL problems ... (%d)\n", aal);
/* fall through */
case ATM_AAL5:
max_sdu = ATM_MAX_AAL5_PDU;
}
if (!tp->max_sdu)
tp->max_sdu = max_sdu;
else if (tp->max_sdu > max_sdu)
return -EINVAL;
if (!tp->max_cdv)
tp->max_cdv = ATM_MAX_CDV;
return 0;
}
static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
{
struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)];
struct hlist_node *node;
struct sock *s;
struct atm_vcc *walk;
sk_for_each(s, node, head) {
walk = atm_sk(s);
if (walk->dev != vcc->dev)
continue;
if (test_bit(ATM_VF_ADDR, &walk->flags) && walk->vpi == vpi &&
walk->vci == vci && ((walk->qos.txtp.traffic_class !=
ATM_NONE && vcc->qos.txtp.traffic_class != ATM_NONE) ||
(walk->qos.rxtp.traffic_class != ATM_NONE &&
vcc->qos.rxtp.traffic_class != ATM_NONE)))
return -EADDRINUSE;
}
/* allow VCCs with same VPI/VCI iff they don't collide on
TX/RX (but we may refuse such sharing for other reasons,
e.g. if protocol requires to have both channels) */
return 0;
}
static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
{
static short p; /* poor man's per-device cache */
static int c;
short old_p;
int old_c;
int err;
if (*vpi != ATM_VPI_ANY && *vci != ATM_VCI_ANY) {
err = check_ci(vcc, *vpi, *vci);
return err;
}
/* last scan may have left values out of bounds for current device */
if (*vpi != ATM_VPI_ANY)
p = *vpi;
else if (p >= 1 << vcc->dev->ci_range.vpi_bits)
p = 0;
if (*vci != ATM_VCI_ANY)
c = *vci;
else if (c < ATM_NOT_RSV_VCI || c >= 1 << vcc->dev->ci_range.vci_bits)
c = ATM_NOT_RSV_VCI;
old_p = p;
old_c = c;
do {
if (!check_ci(vcc, p, c)) {
*vpi = p;
*vci = c;
return 0;
}
if (*vci == ATM_VCI_ANY) {
c++;
if (c >= 1 << vcc->dev->ci_range.vci_bits)
c = ATM_NOT_RSV_VCI;
}
if ((c == ATM_NOT_RSV_VCI || *vci != ATM_VCI_ANY) &&
*vpi == ATM_VPI_ANY) {
p++;
if (p >= 1 << vcc->dev->ci_range.vpi_bits)
p = 0;
}
} while (old_p != p || old_c != c);
return -EADDRINUSE;
}
static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
int vci)
{
struct sock *sk = sk_atm(vcc);
int error;
if ((vpi != ATM_VPI_UNSPEC && vpi != ATM_VPI_ANY &&
vpi >> dev->ci_range.vpi_bits) || (vci != ATM_VCI_UNSPEC &&
vci != ATM_VCI_ANY && vci >> dev->ci_range.vci_bits))
return -EINVAL;
if (vci > 0 && vci < ATM_NOT_RSV_VCI && !capable(CAP_NET_BIND_SERVICE))
return -EPERM;
error = -ENODEV;
if (!try_module_get(dev->ops->owner))
return error;
vcc->dev = dev;
write_lock_irq(&vcc_sklist_lock);
if (test_bit(ATM_DF_REMOVED, &dev->flags) ||
(error = find_ci(vcc, &vpi, &vci))) {
write_unlock_irq(&vcc_sklist_lock);
goto fail_module_put;
}
vcc->vpi = vpi;
vcc->vci = vci;
__vcc_insert_socket(sk);
write_unlock_irq(&vcc_sklist_lock);
switch (vcc->qos.aal) {
case ATM_AAL0:
error = atm_init_aal0(vcc);
vcc->stats = &dev->stats.aal0;
break;
case ATM_AAL34:
error = atm_init_aal34(vcc);
vcc->stats = &dev->stats.aal34;
break;
case ATM_NO_AAL:
/* ATM_AAL5 is also used in the "0 for default" case */
vcc->qos.aal = ATM_AAL5;
/* fall through */
case ATM_AAL5:
error = atm_init_aal5(vcc);
vcc->stats = &dev->stats.aal5;
break;
default:
error = -EPROTOTYPE;
}
if (!error)
error = adjust_tp(&vcc->qos.txtp, vcc->qos.aal);
if (!error)
error = adjust_tp(&vcc->qos.rxtp, vcc->qos.aal);
if (error)
goto fail;
pr_debug("VCC %d.%d, AAL %d\n", vpi, vci, vcc->qos.aal);
pr_debug(" TX: %d, PCR %d..%d, SDU %d\n",
vcc->qos.txtp.traffic_class,
vcc->qos.txtp.min_pcr,
vcc->qos.txtp.max_pcr,
vcc->qos.txtp.max_sdu);
pr_debug(" RX: %d, PCR %d..%d, SDU %d\n",
vcc->qos.rxtp.traffic_class,
vcc->qos.rxtp.min_pcr,
vcc->qos.rxtp.max_pcr,
vcc->qos.rxtp.max_sdu);
if (dev->ops->open) {
error = dev->ops->open(vcc);
if (error)
goto fail;
}
return 0;
fail:
vcc_remove_socket(sk);
fail_module_put:
module_put(dev->ops->owner);
/* ensure we get dev module ref count correct */
vcc->dev = NULL;
return error;
}
int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
{
struct atm_dev *dev;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
pr_debug("(vpi %d, vci %d)\n", vpi, vci);
if (sock->state == SS_CONNECTED)
return -EISCONN;
if (sock->state != SS_UNCONNECTED)
return -EINVAL;
if (!(vpi || vci))
return -EINVAL;
if (vpi != ATM_VPI_UNSPEC && vci != ATM_VCI_UNSPEC)
clear_bit(ATM_VF_PARTIAL, &vcc->flags);
else
if (test_bit(ATM_VF_PARTIAL, &vcc->flags))
return -EINVAL;
pr_debug("(TX: cl %d,bw %d-%d,sdu %d; "
"RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n",
vcc->qos.txtp.traffic_class, vcc->qos.txtp.min_pcr,
vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_sdu,
vcc->qos.rxtp.traffic_class, vcc->qos.rxtp.min_pcr,
vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_sdu,
vcc->qos.aal == ATM_AAL5 ? "" :
vcc->qos.aal == ATM_AAL0 ? "" : " ??? code ",
vcc->qos.aal == ATM_AAL0 ? 0 : vcc->qos.aal);
if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
return -EBADFD;
if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS ||
vcc->qos.rxtp.traffic_class == ATM_ANYCLASS)
return -EINVAL;
if (likely(itf != ATM_ITF_ANY)) {
dev = try_then_request_module(atm_dev_lookup(itf),
"atm-device-%d", itf);
} else {
dev = NULL;
mutex_lock(&atm_dev_mutex);
if (!list_empty(&atm_devs)) {
dev = list_entry(atm_devs.next,
struct atm_dev, dev_list);
atm_dev_hold(dev);
}
mutex_unlock(&atm_dev_mutex);
}
if (!dev)
return -ENODEV;
error = __vcc_connect(vcc, dev, vpi, vci);
if (error) {
atm_dev_put(dev);
return error;
}
if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC)
set_bit(ATM_VF_PARTIAL, &vcc->flags);
if (test_bit(ATM_VF_READY, &ATM_SD(sock)->flags))
sock->state = SS_CONNECTED;
return 0;
}
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
struct sk_buff *skb;
int copied, error = -EINVAL;
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
/* only handle MSG_DONTWAIT and MSG_PEEK */
if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
return -EOPNOTSUPP;
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags))
return 0;
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error);
if (!skb)
return error;
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (error)
return error;
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
skb->truesize);
atm_return(vcc, skb->truesize);
}
skb_free_datagram(sk, skb);
return copied;
}
int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
size_t total_len)
{
struct sock *sk = sock->sk;
DEFINE_WAIT(wait);
struct atm_vcc *vcc;
struct sk_buff *skb;
int eff, error;
const void __user *buff;
int size;
lock_sock(sk);
if (sock->state != SS_CONNECTED) {
error = -ENOTCONN;
goto out;
}
if (m->msg_name) {
error = -EISCONN;
goto out;
}
if (m->msg_iovlen != 1) {
error = -ENOSYS; /* fix this later @@@ */
goto out;
}
buff = m->msg_iov->iov_base;
size = m->msg_iov->iov_len;
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags)) {
error = -EPIPE;
send_sig(SIGPIPE, current, 0);
goto out;
}
if (!size) {
error = 0;
goto out;
}
if (size < 0 || size > vcc->qos.txtp.max_sdu) {
error = -EMSGSIZE;
goto out;
}
eff = (size+3) & ~3; /* align to word boundary */
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
error = 0;
while (!(skb = alloc_tx(vcc, eff))) {
if (m->msg_flags & MSG_DONTWAIT) {
error = -EAGAIN;
break;
}
schedule();
if (signal_pending(current)) {
error = -ERESTARTSYS;
break;
}
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags)) {
error = -EPIPE;
send_sig(SIGPIPE, current, 0);
break;
}
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
finish_wait(sk_sleep(sk), &wait);
if (error)
goto out;
skb->dev = NULL; /* for paths shared with net_device interfaces */
ATM_SKB(skb)->atm_options = vcc->atm_options;
if (copy_from_user(skb_put(skb, size), buff, size)) {
kfree_skb(skb);
error = -EFAULT;
goto out;
}
if (eff != size)
memset(skb->data + size, 0, eff-size);
error = vcc->dev->ops->send(vcc, skb);
error = error ? error : size;
out:
release_sock(sk);
return error;
}
unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
unsigned int mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
vcc = ATM_SD(sock);
/* exceptional events */
if (sk->sk_err)
mask = POLLERR;
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags))
mask |= POLLHUP;
/* readable? */
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
/* writable? */
if (sock->state == SS_CONNECTING &&
test_bit(ATM_VF_WAITING, &vcc->flags))
return mask;
if (vcc->qos.txtp.traffic_class != ATM_NONE &&
vcc_writable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
return mask;
}
static int atm_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
{
int error;
/*
* Don't let the QoS change the already connected AAL type nor the
* traffic class.
*/
if (qos->aal != vcc->qos.aal ||
qos->rxtp.traffic_class != vcc->qos.rxtp.traffic_class ||
qos->txtp.traffic_class != vcc->qos.txtp.traffic_class)
return -EINVAL;
error = adjust_tp(&qos->txtp, qos->aal);
if (!error)
error = adjust_tp(&qos->rxtp, qos->aal);
if (error)
return error;
if (!vcc->dev->ops->change_qos)
return -EOPNOTSUPP;
if (sk_atm(vcc)->sk_family == AF_ATMPVC)
return vcc->dev->ops->change_qos(vcc, qos, ATM_MF_SET);
return svc_change_qos(vcc, qos);
}
static int check_tp(const struct atm_trafprm *tp)
{
/* @@@ Should be merged with adjust_tp */
if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS)
return 0;
if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr &&
!tp->max_pcr)
return -EINVAL;
if (tp->min_pcr == ATM_MAX_PCR)
return -EINVAL;
if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR &&
tp->min_pcr > tp->max_pcr)
return -EINVAL;
/*
* We allow pcr to be outside [min_pcr,max_pcr], because later
* adjustment may still push it in the valid range.
*/
return 0;
}
static int check_qos(const struct atm_qos *qos)
{
int error;
if (!qos->txtp.traffic_class && !qos->rxtp.traffic_class)
return -EINVAL;
if (qos->txtp.traffic_class != qos->rxtp.traffic_class &&
qos->txtp.traffic_class && qos->rxtp.traffic_class &&
qos->txtp.traffic_class != ATM_ANYCLASS &&
qos->rxtp.traffic_class != ATM_ANYCLASS)
return -EINVAL;
error = check_tp(&qos->txtp);
if (error)
return error;
return check_tp(&qos->rxtp);
}
int vcc_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct atm_vcc *vcc;
unsigned long value;
int error;
if (__SO_LEVEL_MATCH(optname, level) && optlen != __SO_SIZE(optname))
return -EINVAL;
vcc = ATM_SD(sock);
switch (optname) {
case SO_ATMQOS:
{
struct atm_qos qos;
if (copy_from_user(&qos, optval, sizeof(qos)))
return -EFAULT;
error = check_qos(&qos);
if (error)
return error;
if (sock->state == SS_CONNECTED)
return atm_change_qos(vcc, &qos);
if (sock->state != SS_UNCONNECTED)
return -EBADFD;
vcc->qos = qos;
set_bit(ATM_VF_HASQOS, &vcc->flags);
return 0;
}
case SO_SETCLP:
if (get_user(value, (unsigned long __user *)optval))
return -EFAULT;
if (value)
vcc->atm_options |= ATM_ATMOPT_CLP;
else
vcc->atm_options &= ~ATM_ATMOPT_CLP;
return 0;
default:
if (level == SOL_SOCKET)
return -EINVAL;
break;
}
if (!vcc->dev || !vcc->dev->ops->setsockopt)
return -EINVAL;
return vcc->dev->ops->setsockopt(vcc, level, optname, optval, optlen);
}
int vcc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct atm_vcc *vcc;
int len;
if (get_user(len, optlen))
return -EFAULT;
if (__SO_LEVEL_MATCH(optname, level) && len != __SO_SIZE(optname))
return -EINVAL;
vcc = ATM_SD(sock);
switch (optname) {
case SO_ATMQOS:
if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
return -EINVAL;
return copy_to_user(optval, &vcc->qos, sizeof(vcc->qos))
? -EFAULT : 0;
case SO_SETCLP:
return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 : 0,
(unsigned long __user *)optval) ? -EFAULT : 0;
case SO_ATMPVC:
{
struct sockaddr_atmpvc pvc;
if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
return -ENOTCONN;
memset(&pvc, 0, sizeof(pvc));
pvc.sap_family = AF_ATMPVC;
pvc.sap_addr.itf = vcc->dev->number;
pvc.sap_addr.vpi = vcc->vpi;
pvc.sap_addr.vci = vcc->vci;
return copy_to_user(optval, &pvc, sizeof(pvc)) ? -EFAULT : 0;
}
default:
if (level == SOL_SOCKET)
return -EINVAL;
break;
}
if (!vcc->dev || !vcc->dev->ops->getsockopt)
return -EINVAL;
return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len);
}
int register_atmdevice_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&atm_dev_notify_chain, nb);
}
EXPORT_SYMBOL_GPL(register_atmdevice_notifier);
void unregister_atmdevice_notifier(struct notifier_block *nb)
{
atomic_notifier_chain_unregister(&atm_dev_notify_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_atmdevice_notifier);
static int __init atm_init(void)
{
int error;
error = proto_register(&vcc_proto, 0);
if (error < 0)
goto out;
error = atmpvc_init();
if (error < 0) {
pr_err("atmpvc_init() failed with %d\n", error);
goto out_unregister_vcc_proto;
}
error = atmsvc_init();
if (error < 0) {
pr_err("atmsvc_init() failed with %d\n", error);
goto out_atmpvc_exit;
}
error = atm_proc_init();
if (error < 0) {
pr_err("atm_proc_init() failed with %d\n", error);
goto out_atmsvc_exit;
}
error = atm_sysfs_init();
if (error < 0) {
pr_err("atm_sysfs_init() failed with %d\n", error);
goto out_atmproc_exit;
}
out:
return error;
out_atmproc_exit:
atm_proc_exit();
out_atmsvc_exit:
atmsvc_exit();
out_atmpvc_exit:
atmsvc_exit();
out_unregister_vcc_proto:
proto_unregister(&vcc_proto);
goto out;
}
static void __exit atm_exit(void)
{
atm_proc_exit();
atm_sysfs_exit();
atmsvc_exit();
atmpvc_exit();
proto_unregister(&vcc_proto);
}
subsys_initcall(atm_init);
module_exit(atm_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_ATMPVC);
MODULE_ALIAS_NETPROTO(PF_ATMSVC);
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_3839_0 |
crossvul-cpp_data_bad_3838_0 | /* net/atm/pvc.c - ATM PVC sockets */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#include <linux/net.h> /* struct socket, struct proto_ops */
#include <linux/atm.h> /* ATM stuff */
#include <linux/atmdev.h> /* ATM devices */
#include <linux/errno.h> /* error codes */
#include <linux/kernel.h> /* printk */
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <linux/export.h>
#include <net/sock.h> /* for sock_no_* */
#include "resources.h" /* devs and vccs */
#include "common.h" /* common for PVCs and SVCs */
static int pvc_shutdown(struct socket *sock, int how)
{
return 0;
}
static int pvc_bind(struct socket *sock, struct sockaddr *sockaddr,
int sockaddr_len)
{
struct sock *sk = sock->sk;
struct sockaddr_atmpvc *addr;
struct atm_vcc *vcc;
int error;
if (sockaddr_len != sizeof(struct sockaddr_atmpvc))
return -EINVAL;
addr = (struct sockaddr_atmpvc *)sockaddr;
if (addr->sap_family != AF_ATMPVC)
return -EAFNOSUPPORT;
lock_sock(sk);
vcc = ATM_SD(sock);
if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
error = -EBADFD;
goto out;
}
if (test_bit(ATM_VF_PARTIAL, &vcc->flags)) {
if (vcc->vpi != ATM_VPI_UNSPEC)
addr->sap_addr.vpi = vcc->vpi;
if (vcc->vci != ATM_VCI_UNSPEC)
addr->sap_addr.vci = vcc->vci;
}
error = vcc_connect(sock, addr->sap_addr.itf, addr->sap_addr.vpi,
addr->sap_addr.vci);
out:
release_sock(sk);
return error;
}
static int pvc_connect(struct socket *sock, struct sockaddr *sockaddr,
int sockaddr_len, int flags)
{
return pvc_bind(sock, sockaddr, sockaddr_len);
}
static int pvc_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int error;
lock_sock(sk);
error = vcc_setsockopt(sock, level, optname, optval, optlen);
release_sock(sk);
return error;
}
static int pvc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int error;
lock_sock(sk);
error = vcc_getsockopt(sock, level, optname, optval, optlen);
release_sock(sk);
return error;
}
static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
int *sockaddr_len, int peer)
{
struct sockaddr_atmpvc *addr;
struct atm_vcc *vcc = ATM_SD(sock);
if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
return -ENOTCONN;
*sockaddr_len = sizeof(struct sockaddr_atmpvc);
addr = (struct sockaddr_atmpvc *)sockaddr;
addr->sap_family = AF_ATMPVC;
addr->sap_addr.itf = vcc->dev->number;
addr->sap_addr.vpi = vcc->vpi;
addr->sap_addr.vci = vcc->vci;
return 0;
}
static const struct proto_ops pvc_proto_ops = {
.family = PF_ATMPVC,
.owner = THIS_MODULE,
.release = vcc_release,
.bind = pvc_bind,
.connect = pvc_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = pvc_getname,
.poll = vcc_poll,
.ioctl = vcc_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vcc_compat_ioctl,
#endif
.listen = sock_no_listen,
.shutdown = pvc_shutdown,
.setsockopt = pvc_setsockopt,
.getsockopt = pvc_getsockopt,
.sendmsg = vcc_sendmsg,
.recvmsg = vcc_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static int pvc_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
if (net != &init_net)
return -EAFNOSUPPORT;
sock->ops = &pvc_proto_ops;
return vcc_create(net, sock, protocol, PF_ATMPVC);
}
static const struct net_proto_family pvc_family_ops = {
.family = PF_ATMPVC,
.create = pvc_create,
.owner = THIS_MODULE,
};
/*
* Initialize the ATM PVC protocol family
*/
int __init atmpvc_init(void)
{
return sock_register(&pvc_family_ops);
}
void atmpvc_exit(void)
{
sock_unregister(PF_ATMPVC);
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_3838_0 |
crossvul-cpp_data_bad_1508_5 | /*
Copyright (C) 2009 RedHat inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <sys/statvfs.h>
#include "internal_libabrt.h"
int low_free_space(unsigned setting_MaxCrashReportsSize, const char *dump_location)
{
struct statvfs vfs;
if (statvfs(dump_location, &vfs) != 0)
{
perror_msg("statvfs('%s')", dump_location);
return 0;
}
/* Check that at least MaxCrashReportsSize/4 MBs are free */
/* fs_free_mb_x4 ~= vfs.f_bfree * vfs.f_bsize * 4, expressed in MBytes.
* Need to neither overflow nor round f_bfree down too much. */
unsigned long fs_free_mb_x4 = ((unsigned long long)vfs.f_bfree / (1024/4)) * vfs.f_bsize / 1024;
if (fs_free_mb_x4 < setting_MaxCrashReportsSize)
{
error_msg("Only %luMiB is available on %s",
fs_free_mb_x4 / 4, dump_location);
return 1;
}
return 0;
}
/* rhbz#539551: "abrt going crazy when crashing process is respawned".
* Check total size of problem dirs, if it overflows,
* delete oldest/biggest dirs.
*/
void trim_problem_dirs(const char *dirname, double cap_size, const char *exclude_path)
{
const char *excluded_basename = NULL;
if (exclude_path)
{
unsigned len_dirname = strlen(dirname);
/* Trim trailing '/'s, but dont trim name "/" to "" */
while (len_dirname > 1 && dirname[len_dirname-1] == '/')
len_dirname--;
if (strncmp(dirname, exclude_path, len_dirname) == 0
&& exclude_path[len_dirname] == '/'
) {
/* exclude_path is "dirname/something" */
excluded_basename = exclude_path + len_dirname + 1;
}
}
log_debug("excluded_basename:'%s'", excluded_basename);
int count = 20;
while (--count >= 0)
{
/* We exclude our own dir from candidates for deletion (3rd param): */
char *worst_basename = NULL;
double cur_size = get_dirsize_find_largest_dir(dirname, &worst_basename, excluded_basename);
if (cur_size <= cap_size || !worst_basename)
{
log_info("cur_size:%.0f cap_size:%.0f, no (more) trimming", cur_size, cap_size);
free(worst_basename);
break;
}
log("%s is %.0f bytes (more than %.0fMiB), deleting '%s'",
dirname, cur_size, cap_size / (1024*1024), worst_basename);
char *d = concat_path_file(dirname, worst_basename);
free(worst_basename);
delete_dump_dir(d);
free(d);
}
}
/**
*
* @param[out] status See `man 2 wait` for status information.
* @return Malloc'ed string
*/
static char* exec_vp(char **args, int redirect_stderr, int exec_timeout_sec, int *status)
{
/* Nuke everything which may make setlocale() switch to non-POSIX locale:
* we need to avoid having gdb output in some obscure language.
*/
static const char *const env_vec[] = {
"LANG",
"LC_ALL",
"LC_COLLATE",
"LC_CTYPE",
"LC_MESSAGES",
"LC_MONETARY",
"LC_NUMERIC",
"LC_TIME",
/* Workaround for
* http://sourceware.org/bugzilla/show_bug.cgi?id=9622
* (gdb emitting ESC sequences even with -batch)
*/
"TERM",
NULL
};
int flags = EXECFLG_INPUT_NUL | EXECFLG_OUTPUT | EXECFLG_SETSID | EXECFLG_QUIET;
if (redirect_stderr)
flags |= EXECFLG_ERR2OUT;
VERB1 flags &= ~EXECFLG_QUIET;
int pipeout[2];
pid_t child = fork_execv_on_steroids(flags, args, pipeout, (char**)env_vec, /*dir:*/ NULL, /*uid(unused):*/ 0);
/* We use this function to run gdb and unstrip. Bugs in gdb or corrupted
* coredumps were observed to cause gdb to enter infinite loop.
* Therefore we have a (largish) timeout, after which we kill the child.
*/
ndelay_on(pipeout[0]);
int t = time(NULL); /* int is enough, no need to use time_t */
int endtime = t + exec_timeout_sec;
struct strbuf *buf_out = strbuf_new();
while (1)
{
int timeout = endtime - t;
if (timeout < 0)
{
kill(child, SIGKILL);
strbuf_append_strf(buf_out, "\n"
"Timeout exceeded: %u seconds, killing %s.\n"
"Looks like gdb hung while generating backtrace.\n"
"This may be a bug in gdb. Consider submitting a bug report to gdb developers.\n"
"Please attach coredump from this crash to the bug report if you do.\n",
exec_timeout_sec, args[0]
);
break;
}
/* We don't check poll result - checking read result is enough */
struct pollfd pfd;
pfd.fd = pipeout[0];
pfd.events = POLLIN;
poll(&pfd, 1, timeout * 1000);
char buff[1024];
int r = read(pipeout[0], buff, sizeof(buff) - 1);
if (r <= 0)
{
/* I did see EAGAIN happening here */
if (r < 0 && errno == EAGAIN)
goto next;
break;
}
buff[r] = '\0';
strbuf_append_str(buf_out, buff);
next:
t = time(NULL);
}
close(pipeout[0]);
/* Prevent having zombie child process, and maybe collect status
* (note that status == NULL is ok too) */
safe_waitpid(child, status, 0);
return strbuf_free_nobuf(buf_out);
}
char *run_unstrip_n(const char *dump_dir_name, unsigned timeout_sec)
{
int flags = EXECFLG_INPUT_NUL | EXECFLG_OUTPUT | EXECFLG_SETSID | EXECFLG_QUIET;
VERB1 flags &= ~EXECFLG_QUIET;
int pipeout[2];
char* args[4];
args[0] = (char*)"eu-unstrip";
args[1] = xasprintf("--core=%s/"FILENAME_COREDUMP, dump_dir_name);
args[2] = (char*)"-n";
args[3] = NULL;
pid_t child = fork_execv_on_steroids(flags, args, pipeout, /*env_vec:*/ NULL, /*dir:*/ NULL, /*uid(unused):*/ 0);
free(args[1]);
/* Bugs in unstrip or corrupted coredumps can cause it to enter infinite loop.
* Therefore we have a (largish) timeout, after which we kill the child.
*/
ndelay_on(pipeout[0]);
int t = time(NULL); /* int is enough, no need to use time_t */
int endtime = t + timeout_sec;
struct strbuf *buf_out = strbuf_new();
while (1)
{
int timeout = endtime - t;
if (timeout < 0)
{
kill(child, SIGKILL);
strbuf_free(buf_out);
buf_out = NULL;
break;
}
/* We don't check poll result - checking read result is enough */
struct pollfd pfd;
pfd.fd = pipeout[0];
pfd.events = POLLIN;
poll(&pfd, 1, timeout * 1000);
char buff[1024];
int r = read(pipeout[0], buff, sizeof(buff) - 1);
if (r <= 0)
{
/* I did see EAGAIN happening here */
if (r < 0 && errno == EAGAIN)
goto next;
break;
}
buff[r] = '\0';
strbuf_append_str(buf_out, buff);
next:
t = time(NULL);
}
close(pipeout[0]);
/* Prevent having zombie child process */
int status;
safe_waitpid(child, &status, 0);
if (status != 0 || buf_out == NULL)
{
/* unstrip didnt exit with exit code 0, or we timed out */
strbuf_free(buf_out);
return NULL;
}
return strbuf_free_nobuf(buf_out);
}
char *get_backtrace(const char *dump_dir_name, unsigned timeout_sec, const char *debuginfo_dirs)
{
INITIALIZE_LIBABRT();
struct dump_dir *dd = dd_opendir(dump_dir_name, /*flags:*/ 0);
if (!dd)
return NULL;
char *executable = dd_load_text(dd, FILENAME_EXECUTABLE);
dd_close(dd);
/* Let user know what's going on */
log(_("Generating backtrace"));
unsigned i = 0;
char *args[25];
args[i++] = (char*)"gdb";
args[i++] = (char*)"-batch";
struct strbuf *set_debug_file_directory = strbuf_new();
unsigned auto_load_base_index = 0;
if(debuginfo_dirs == NULL)
{
// set non-existent debug file directory to prevent resolving
// function names - we need offsets for core backtrace.
strbuf_append_str(set_debug_file_directory, "set debug-file-directory /");
}
else
{
strbuf_append_str(set_debug_file_directory, "set debug-file-directory /usr/lib/debug");
struct strbuf *debug_directories = strbuf_new();
const char *p = debuginfo_dirs;
while (1)
{
while (*p == ':')
p++;
if (*p == '\0')
break;
const char *colon_or_nul = strchrnul(p, ':');
strbuf_append_strf(debug_directories, "%s%.*s/usr/lib/debug", (debug_directories->len == 0 ? "" : ":"),
(int)(colon_or_nul - p), p);
p = colon_or_nul;
}
strbuf_append_strf(set_debug_file_directory, ":%s", debug_directories->buf);
args[i++] = (char*)"-iex";
auto_load_base_index = i;
args[i++] = xasprintf("add-auto-load-safe-path %s", debug_directories->buf);
args[i++] = (char*)"-iex";
args[i++] = xasprintf("add-auto-load-scripts-directory %s", debug_directories->buf);
strbuf_free(debug_directories);
}
args[i++] = (char*)"-ex";
const unsigned debug_dir_cmd_index = i++;
args[debug_dir_cmd_index] = strbuf_free_nobuf(set_debug_file_directory);
/* "file BINARY_FILE" is needed, without it gdb cannot properly
* unwind the stack. Currently the unwind information is located
* in .eh_frame which is stored only in binary, not in coredump
* or debuginfo.
*
* Fedora GDB does not strictly need it, it will find the binary
* by its build-id. But for binaries either without build-id
* (= built on non-Fedora GCC) or which do not have
* their debuginfo rpm installed gdb would not find BINARY_FILE
* so it is still makes sense to supply "file BINARY_FILE".
*
* Unfortunately, "file BINARY_FILE" doesn't work well if BINARY_FILE
* was deleted (as often happens during system updates):
* gdb uses specified BINARY_FILE
* even if it is completely unrelated to the coredump.
* See https://bugzilla.redhat.com/show_bug.cgi?id=525721
*
* TODO: check mtimes on COREFILE and BINARY_FILE and not supply
* BINARY_FILE if it is newer (to at least avoid gdb complaining).
*/
args[i++] = (char*)"-ex";
const unsigned file_cmd_index = i++;
args[file_cmd_index] = xasprintf("file %s", executable);
free(executable);
args[i++] = (char*)"-ex";
const unsigned core_cmd_index = i++;
args[core_cmd_index] = xasprintf("core-file %s/"FILENAME_COREDUMP, dump_dir_name);
args[i++] = (char*)"-ex";
const unsigned bt_cmd_index = i++;
/*args[9] = ... see below */
args[i++] = (char*)"-ex";
args[i++] = (char*)"info sharedlib";
/* glibc's abort() stores its message in __abort_msg variable */
args[i++] = (char*)"-ex";
args[i++] = (char*)"print (char*)__abort_msg";
args[i++] = (char*)"-ex";
args[i++] = (char*)"print (char*)__glib_assert_msg";
args[i++] = (char*)"-ex";
args[i++] = (char*)"info all-registers";
args[i++] = (char*)"-ex";
const unsigned dis_cmd_index = i++;
args[dis_cmd_index] = (char*)"disassemble";
args[i++] = NULL;
/* Get the backtrace, but try to cap its size */
/* Limit bt depth. With no limit, gdb sometimes OOMs the machine */
unsigned bt_depth = 1024;
const char *thread_apply_all = "thread apply all";
const char *full = " full";
char *bt = NULL;
while (1)
{
args[bt_cmd_index] = xasprintf("%s backtrace %u%s", thread_apply_all, bt_depth, full);
bt = exec_vp(args, /*redirect_stderr:*/ 1, timeout_sec, NULL);
free(args[bt_cmd_index]);
if ((bt && strnlen(bt, 256*1024) < 256*1024) || bt_depth <= 32)
{
break;
}
bt_depth /= 2;
if (bt)
log("Backtrace is too big (%u bytes), reducing depth to %u",
(unsigned)strlen(bt), bt_depth);
else
/* (NB: in fact, current impl. of exec_vp() never returns NULL) */
log("Failed to generate backtrace, reducing depth to %u",
bt_depth);
free(bt);
/* Replace -ex disassemble (which disasms entire function $pc points to)
* to a version which analyzes limited, small patch of code around $pc.
* (Users reported a case where bare "disassemble" attempted to process
* entire .bss).
* TODO: what if "$pc-N" underflows? in my test, this happens:
* Dump of assembler code from 0xfffffffffffffff0 to 0x30:
* End of assembler dump.
* (IOW: "empty" dump)
*/
args[dis_cmd_index] = (char*)"disassemble $pc-20, $pc+64";
if (bt_depth <= 64 && thread_apply_all[0] != '\0')
{
/* This program likely has gazillion threads, dont try to bt them all */
bt_depth = 128;
thread_apply_all = "";
}
if (bt_depth <= 64 && full[0] != '\0')
{
/* Looks like there are gigantic local structures or arrays, disable "full" bt */
bt_depth = 128;
full = "";
}
}
if (auto_load_base_index > 0)
{
free(args[auto_load_base_index]);
free(args[auto_load_base_index + 2]);
}
free(args[debug_dir_cmd_index]);
free(args[file_cmd_index]);
free(args[core_cmd_index]);
return bt;
}
char* problem_data_save(problem_data_t *pd)
{
load_abrt_conf();
struct dump_dir *dd = create_dump_dir_from_problem_data(pd, g_settings_dump_location);
char *problem_id = NULL;
if (dd)
{
problem_id = xstrdup(dd->dd_dirname);
dd_close(dd);
}
log_info("problem id: '%s'", problem_id);
return problem_id;
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_1508_5 |
crossvul-cpp_data_good_1781_0 | /*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/buffer_head.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/bit_spinlock.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/falloc.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/mount.h>
#include <linux/btrfs.h>
#include <linux/blkdev.h>
#include <linux/posix_acl_xattr.h>
#include <linux/uio.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "print-tree.h"
#include "ordered-data.h"
#include "xattr.h"
#include "tree-log.h"
#include "volumes.h"
#include "compression.h"
#include "locking.h"
#include "free-space-cache.h"
#include "inode-map.h"
#include "backref.h"
#include "hash.h"
#include "props.h"
#include "qgroup.h"
struct btrfs_iget_args {
struct btrfs_key *location;
struct btrfs_root *root;
};
static const struct inode_operations btrfs_dir_inode_operations;
static const struct inode_operations btrfs_symlink_inode_operations;
static const struct inode_operations btrfs_dir_ro_inode_operations;
static const struct inode_operations btrfs_special_inode_operations;
static const struct inode_operations btrfs_file_inode_operations;
static const struct address_space_operations btrfs_aops;
static const struct address_space_operations btrfs_symlink_aops;
static const struct file_operations btrfs_dir_file_operations;
static struct extent_io_ops btrfs_extent_io_ops;
static struct kmem_cache *btrfs_inode_cachep;
static struct kmem_cache *btrfs_delalloc_work_cachep;
struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_transaction_cachep;
struct kmem_cache *btrfs_path_cachep;
struct kmem_cache *btrfs_free_space_cachep;
#define S_SHIFT 12
static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
[S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
[S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
[S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
[S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
[S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
};
static int btrfs_setsize(struct inode *inode, struct iattr *attr);
static int btrfs_truncate(struct inode *inode);
static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written, int unlock);
static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
u64 len, u64 orig_start,
u64 block_start, u64 block_len,
u64 orig_block_len, u64 ram_bytes,
int type);
static int btrfs_dirty_inode(struct inode *inode);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
void btrfs_test_inode_set_ops(struct inode *inode)
{
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
}
#endif
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir,
const struct qstr *qstr)
{
int err;
err = btrfs_init_acl(trans, inode, dir);
if (!err)
err = btrfs_xattr_security_init(trans, inode, dir, qstr);
return err;
}
/*
* this does all the hard work for inserting an inline extent into
* the btree. The caller should have done a btrfs_drop_extents so that
* no overlapping inline items exist in the btree
*/
static int insert_inline_extent(struct btrfs_trans_handle *trans,
struct btrfs_path *path, int extent_inserted,
struct btrfs_root *root, struct inode *inode,
u64 start, size_t size, size_t compressed_size,
int compress_type,
struct page **compressed_pages)
{
struct extent_buffer *leaf;
struct page *page = NULL;
char *kaddr;
unsigned long ptr;
struct btrfs_file_extent_item *ei;
int err = 0;
int ret;
size_t cur_size = size;
unsigned long offset;
if (compressed_size && compressed_pages)
cur_size = compressed_size;
inode_add_bytes(inode, size);
if (!extent_inserted) {
struct btrfs_key key;
size_t datasize;
key.objectid = btrfs_ino(inode);
key.offset = start;
key.type = BTRFS_EXTENT_DATA_KEY;
datasize = btrfs_file_extent_calc_inline_size(cur_size);
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, root, path, &key,
datasize);
if (ret) {
err = ret;
goto fail;
}
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, ei, trans->transid);
btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
btrfs_set_file_extent_encryption(leaf, ei, 0);
btrfs_set_file_extent_other_encoding(leaf, ei, 0);
btrfs_set_file_extent_ram_bytes(leaf, ei, size);
ptr = btrfs_file_extent_inline_start(ei);
if (compress_type != BTRFS_COMPRESS_NONE) {
struct page *cpage;
int i = 0;
while (compressed_size > 0) {
cpage = compressed_pages[i];
cur_size = min_t(unsigned long, compressed_size,
PAGE_CACHE_SIZE);
kaddr = kmap_atomic(cpage);
write_extent_buffer(leaf, kaddr, ptr, cur_size);
kunmap_atomic(kaddr);
i++;
ptr += cur_size;
compressed_size -= cur_size;
}
btrfs_set_file_extent_compression(leaf, ei,
compress_type);
} else {
page = find_get_page(inode->i_mapping,
start >> PAGE_CACHE_SHIFT);
btrfs_set_file_extent_compression(leaf, ei, 0);
kaddr = kmap_atomic(page);
offset = start & (PAGE_CACHE_SIZE - 1);
write_extent_buffer(leaf, kaddr + offset, ptr, size);
kunmap_atomic(kaddr);
page_cache_release(page);
}
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
/*
* we're an inline extent, so nobody can
* extend the file past i_size without locking
* a page we already have locked.
*
* We must do any isize and inode updates
* before we unlock the pages. Otherwise we
* could end up racing with unlink.
*/
BTRFS_I(inode)->disk_i_size = inode->i_size;
ret = btrfs_update_inode(trans, root, inode);
return ret;
fail:
return err;
}
/*
* conditionally insert an inline extent into the file. This
* does the checks required to make sure the data is small enough
* to fit as an inline extent.
*/
static noinline int cow_file_range_inline(struct btrfs_root *root,
struct inode *inode, u64 start,
u64 end, size_t compressed_size,
int compress_type,
struct page **compressed_pages)
{
struct btrfs_trans_handle *trans;
u64 isize = i_size_read(inode);
u64 actual_end = min(end + 1, isize);
u64 inline_len = actual_end - start;
u64 aligned_end = ALIGN(end, root->sectorsize);
u64 data_len = inline_len;
int ret;
struct btrfs_path *path;
int extent_inserted = 0;
u32 extent_item_size;
if (compressed_size)
data_len = compressed_size;
if (start > 0 ||
actual_end > PAGE_CACHE_SIZE ||
data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
(!compressed_size &&
(actual_end & (root->sectorsize - 1)) == 0) ||
end + 1 < isize ||
data_len > root->fs_info->max_inline) {
return 1;
}
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
btrfs_free_path(path);
return PTR_ERR(trans);
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
if (compressed_size && compressed_pages)
extent_item_size = btrfs_file_extent_calc_inline_size(
compressed_size);
else
extent_item_size = btrfs_file_extent_calc_inline_size(
inline_len);
ret = __btrfs_drop_extents(trans, root, inode, path,
start, aligned_end, NULL,
1, 1, extent_item_size, &extent_inserted);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
if (isize > actual_end)
inline_len = min_t(u64, isize, actual_end);
ret = insert_inline_extent(trans, path, extent_inserted,
root, inode, start,
inline_len, compressed_size,
compress_type, compressed_pages);
if (ret && ret != -ENOSPC) {
btrfs_abort_transaction(trans, root, ret);
goto out;
} else if (ret == -ENOSPC) {
ret = 1;
goto out;
}
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
btrfs_delalloc_release_metadata(inode, end + 1 - start);
btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
out:
btrfs_free_path(path);
btrfs_end_transaction(trans, root);
return ret;
}
struct async_extent {
u64 start;
u64 ram_size;
u64 compressed_size;
struct page **pages;
unsigned long nr_pages;
int compress_type;
struct list_head list;
};
struct async_cow {
struct inode *inode;
struct btrfs_root *root;
struct page *locked_page;
u64 start;
u64 end;
struct list_head extents;
struct btrfs_work work;
};
static noinline int add_async_extent(struct async_cow *cow,
u64 start, u64 ram_size,
u64 compressed_size,
struct page **pages,
unsigned long nr_pages,
int compress_type)
{
struct async_extent *async_extent;
async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
BUG_ON(!async_extent); /* -ENOMEM */
async_extent->start = start;
async_extent->ram_size = ram_size;
async_extent->compressed_size = compressed_size;
async_extent->pages = pages;
async_extent->nr_pages = nr_pages;
async_extent->compress_type = compress_type;
list_add_tail(&async_extent->list, &cow->extents);
return 0;
}
static inline int inode_need_compress(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
/* force compress */
if (btrfs_test_opt(root, FORCE_COMPRESS))
return 1;
/* bad compression ratios */
if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
return 0;
if (btrfs_test_opt(root, COMPRESS) ||
BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
BTRFS_I(inode)->force_compress)
return 1;
return 0;
}
/*
* we create compressed extents in two phases. The first
* phase compresses a range of pages that have already been
* locked (both pages and state bits are locked).
*
* This is done inside an ordered work queue, and the compression
* is spread across many cpus. The actual IO submission is step
* two, and the ordered work queue takes care of making sure that
* happens in the same order things were put onto the queue by
* writepages and friends.
*
* If this code finds it can't get good compression, it puts an
* entry onto the work queue to write the uncompressed bytes. This
* makes sure that both compressed inodes and uncompressed inodes
* are written in the same order that the flusher thread sent them
* down.
*/
static noinline void compress_file_range(struct inode *inode,
struct page *locked_page,
u64 start, u64 end,
struct async_cow *async_cow,
int *num_added)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 num_bytes;
u64 blocksize = root->sectorsize;
u64 actual_end;
u64 isize = i_size_read(inode);
int ret = 0;
struct page **pages = NULL;
unsigned long nr_pages;
unsigned long nr_pages_ret = 0;
unsigned long total_compressed = 0;
unsigned long total_in = 0;
unsigned long max_compressed = 128 * 1024;
unsigned long max_uncompressed = 128 * 1024;
int i;
int will_compress;
int compress_type = root->fs_info->compress_type;
int redirty = 0;
/* if this is a small write inside eof, kick off a defrag */
if ((end - start + 1) < 16 * 1024 &&
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
btrfs_add_inode_defrag(NULL, inode);
actual_end = min_t(u64, isize, end + 1);
again:
will_compress = 0;
nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
/*
* we don't want to send crud past the end of i_size through
* compression, that's just a waste of CPU time. So, if the
* end of the file is before the start of our current
* requested range of bytes, we bail out to the uncompressed
* cleanup code that can deal with all of this.
*
* It isn't really the fastest way to fix things, but this is a
* very uncommon corner.
*/
if (actual_end <= start)
goto cleanup_and_bail_uncompressed;
total_compressed = actual_end - start;
/*
* skip compression for a small file range(<=blocksize) that
* isn't an inline extent, since it dosen't save disk space at all.
*/
if (total_compressed <= blocksize &&
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
goto cleanup_and_bail_uncompressed;
/* we want to make sure that amount of ram required to uncompress
* an extent is reasonable, so we limit the total size in ram
* of a compressed extent to 128k. This is a crucial number
* because it also controls how easily we can spread reads across
* cpus for decompression.
*
* We also want to make sure the amount of IO required to do
* a random read is reasonably small, so we limit the size of
* a compressed extent to 128k.
*/
total_compressed = min(total_compressed, max_uncompressed);
num_bytes = ALIGN(end - start + 1, blocksize);
num_bytes = max(blocksize, num_bytes);
total_in = 0;
ret = 0;
/*
* we do compression for mount -o compress and when the
* inode has not been flagged as nocompress. This flag can
* change at any time if we discover bad compression ratios.
*/
if (inode_need_compress(inode)) {
WARN_ON(pages);
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
if (!pages) {
/* just bail out to the uncompressed code */
goto cont;
}
if (BTRFS_I(inode)->force_compress)
compress_type = BTRFS_I(inode)->force_compress;
/*
* we need to call clear_page_dirty_for_io on each
* page in the range. Otherwise applications with the file
* mmap'd can wander in and change the page contents while
* we are compressing them.
*
* If the compression fails for any reason, we set the pages
* dirty again later on.
*/
extent_range_clear_dirty_for_io(inode, start, end);
redirty = 1;
ret = btrfs_compress_pages(compress_type,
inode->i_mapping, start,
total_compressed, pages,
nr_pages, &nr_pages_ret,
&total_in,
&total_compressed,
max_compressed);
if (!ret) {
unsigned long offset = total_compressed &
(PAGE_CACHE_SIZE - 1);
struct page *page = pages[nr_pages_ret - 1];
char *kaddr;
/* zero the tail end of the last page, we might be
* sending it down to disk
*/
if (offset) {
kaddr = kmap_atomic(page);
memset(kaddr + offset, 0,
PAGE_CACHE_SIZE - offset);
kunmap_atomic(kaddr);
}
will_compress = 1;
}
}
cont:
if (start == 0) {
/* lets try to make an inline extent */
if (ret || total_in < (actual_end - start)) {
/* we didn't compress the entire range, try
* to make an uncompressed inline extent.
*/
ret = cow_file_range_inline(root, inode, start, end,
0, 0, NULL);
} else {
/* try making a compressed inline extent */
ret = cow_file_range_inline(root, inode, start, end,
total_compressed,
compress_type, pages);
}
if (ret <= 0) {
unsigned long clear_flags = EXTENT_DELALLOC |
EXTENT_DEFRAG;
unsigned long page_error_op;
clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
/*
* inline extent creation worked or returned error,
* we don't need to create any more async work items.
* Unlock and free up our temp pages.
*/
extent_clear_unlock_delalloc(inode, start, end, NULL,
clear_flags, PAGE_UNLOCK |
PAGE_CLEAR_DIRTY |
PAGE_SET_WRITEBACK |
page_error_op |
PAGE_END_WRITEBACK);
goto free_pages_out;
}
}
if (will_compress) {
/*
* we aren't doing an inline extent round the compressed size
* up to a block size boundary so the allocator does sane
* things
*/
total_compressed = ALIGN(total_compressed, blocksize);
/*
* one last check to make sure the compression is really a
* win, compare the page count read with the blocks on disk
*/
total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
if (total_compressed >= total_in) {
will_compress = 0;
} else {
num_bytes = total_in;
}
}
if (!will_compress && pages) {
/*
* the compression code ran but failed to make things smaller,
* free any pages it allocated and our page pointer array
*/
for (i = 0; i < nr_pages_ret; i++) {
WARN_ON(pages[i]->mapping);
page_cache_release(pages[i]);
}
kfree(pages);
pages = NULL;
total_compressed = 0;
nr_pages_ret = 0;
/* flag the file so we don't compress in the future */
if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
!(BTRFS_I(inode)->force_compress)) {
BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
}
}
if (will_compress) {
*num_added += 1;
/* the async work queues will take care of doing actual
* allocation on disk for these compressed pages,
* and will submit them to the elevator.
*/
add_async_extent(async_cow, start, num_bytes,
total_compressed, pages, nr_pages_ret,
compress_type);
if (start + num_bytes < end) {
start += num_bytes;
pages = NULL;
cond_resched();
goto again;
}
} else {
cleanup_and_bail_uncompressed:
/*
* No compression, but we still need to write the pages in
* the file we've been given so far. redirty the locked
* page if it corresponds to our extent and set things up
* for the async work queue to run cow_file_range to do
* the normal delalloc dance
*/
if (page_offset(locked_page) >= start &&
page_offset(locked_page) <= end) {
__set_page_dirty_nobuffers(locked_page);
/* unlocked later on in the async handlers */
}
if (redirty)
extent_range_redirty_for_io(inode, start, end);
add_async_extent(async_cow, start, end - start + 1,
0, NULL, 0, BTRFS_COMPRESS_NONE);
*num_added += 1;
}
return;
free_pages_out:
for (i = 0; i < nr_pages_ret; i++) {
WARN_ON(pages[i]->mapping);
page_cache_release(pages[i]);
}
kfree(pages);
}
static void free_async_extent_pages(struct async_extent *async_extent)
{
int i;
if (!async_extent->pages)
return;
for (i = 0; i < async_extent->nr_pages; i++) {
WARN_ON(async_extent->pages[i]->mapping);
page_cache_release(async_extent->pages[i]);
}
kfree(async_extent->pages);
async_extent->nr_pages = 0;
async_extent->pages = NULL;
}
/*
* phase two of compressed writeback. This is the ordered portion
* of the code, which only gets called in the order the work was
* queued. We walk all the async extents created by compress_file_range
* and send them down to the disk.
*/
static noinline void submit_compressed_extents(struct inode *inode,
struct async_cow *async_cow)
{
struct async_extent *async_extent;
u64 alloc_hint = 0;
struct btrfs_key ins;
struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_io_tree *io_tree;
int ret = 0;
again:
while (!list_empty(&async_cow->extents)) {
async_extent = list_entry(async_cow->extents.next,
struct async_extent, list);
list_del(&async_extent->list);
io_tree = &BTRFS_I(inode)->io_tree;
retry:
/* did the compression code fall back to uncompressed IO? */
if (!async_extent->pages) {
int page_started = 0;
unsigned long nr_written = 0;
lock_extent(io_tree, async_extent->start,
async_extent->start +
async_extent->ram_size - 1);
/* allocate blocks */
ret = cow_file_range(inode, async_cow->locked_page,
async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
&page_started, &nr_written, 0);
/* JDM XXX */
/*
* if page_started, cow_file_range inserted an
* inline extent and took care of all the unlocking
* and IO for us. Otherwise, we need to submit
* all those pages down to the drive.
*/
if (!page_started && !ret)
extent_write_locked_range(io_tree,
inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
btrfs_get_extent,
WB_SYNC_ALL);
else if (ret)
unlock_page(async_cow->locked_page);
kfree(async_extent);
cond_resched();
continue;
}
lock_extent(io_tree, async_extent->start,
async_extent->start + async_extent->ram_size - 1);
ret = btrfs_reserve_extent(root,
async_extent->compressed_size,
async_extent->compressed_size,
0, alloc_hint, &ins, 1, 1);
if (ret) {
free_async_extent_pages(async_extent);
if (ret == -ENOSPC) {
unlock_extent(io_tree, async_extent->start,
async_extent->start +
async_extent->ram_size - 1);
/*
* we need to redirty the pages if we decide to
* fallback to uncompressed IO, otherwise we
* will not submit these pages down to lower
* layers.
*/
extent_range_redirty_for_io(inode,
async_extent->start,
async_extent->start +
async_extent->ram_size - 1);
goto retry;
}
goto out_free;
}
/*
* here we're doing allocation and writeback of the
* compressed pages
*/
btrfs_drop_extent_cache(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1, 0);
em = alloc_extent_map();
if (!em) {
ret = -ENOMEM;
goto out_free_reserve;
}
em->start = async_extent->start;
em->len = async_extent->ram_size;
em->orig_start = em->start;
em->mod_start = em->start;
em->mod_len = em->len;
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->orig_block_len = ins.offset;
em->ram_bytes = async_extent->ram_size;
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->compress_type = async_extent->compress_type;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
em->generation = -1;
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 1);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
}
btrfs_drop_extent_cache(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1, 0);
}
if (ret)
goto out_free_reserve;
ret = btrfs_add_ordered_extent_compress(inode,
async_extent->start,
ins.objectid,
async_extent->ram_size,
ins.offset,
BTRFS_ORDERED_COMPRESSED,
async_extent->compress_type);
if (ret) {
btrfs_drop_extent_cache(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1, 0);
goto out_free_reserve;
}
/*
* clear dirty, set writeback and unlock the pages.
*/
extent_clear_unlock_delalloc(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
PAGE_SET_WRITEBACK);
ret = btrfs_submit_compressed_write(inode,
async_extent->start,
async_extent->ram_size,
ins.objectid,
ins.offset, async_extent->pages,
async_extent->nr_pages);
if (ret) {
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct page *p = async_extent->pages[0];
const u64 start = async_extent->start;
const u64 end = start + async_extent->ram_size - 1;
p->mapping = inode->i_mapping;
tree->ops->writepage_end_io_hook(p, start, end,
NULL, 0);
p->mapping = NULL;
extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
PAGE_END_WRITEBACK |
PAGE_SET_ERROR);
free_async_extent_pages(async_extent);
}
alloc_hint = ins.objectid + ins.offset;
kfree(async_extent);
cond_resched();
}
return;
out_free_reserve:
btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
out_free:
extent_clear_unlock_delalloc(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
PAGE_SET_ERROR);
free_async_extent_pages(async_extent);
kfree(async_extent);
goto again;
}
static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
u64 num_bytes)
{
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em;
u64 alloc_hint = 0;
read_lock(&em_tree->lock);
em = search_extent_mapping(em_tree, start, num_bytes);
if (em) {
/*
* if block start isn't an actual block number then find the
* first block in this inode and use that as a hint. If that
* block is also bogus then just don't worry about it.
*/
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
free_extent_map(em);
em = search_extent_mapping(em_tree, 0, 0);
if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
alloc_hint = em->block_start;
if (em)
free_extent_map(em);
} else {
alloc_hint = em->block_start;
free_extent_map(em);
}
}
read_unlock(&em_tree->lock);
return alloc_hint;
}
/*
* when extent_io.c finds a delayed allocation range in the file,
* the call backs end up in this code. The basic idea is to
* allocate extents on disk for the range, and create ordered data structs
* in ram to track those extents.
*
* locked_page is the page that writepage had locked already. We use
* it to make sure we don't do extra locks or unlocks.
*
* *page_started is set to one if we unlock locked_page and do everything
* required to start IO on it. It may be clean and already done with
* IO when we return.
*/
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written,
int unlock)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 alloc_hint = 0;
u64 num_bytes;
unsigned long ram_size;
u64 disk_num_bytes;
u64 cur_alloc_size;
u64 blocksize = root->sectorsize;
struct btrfs_key ins;
struct extent_map *em;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
int ret = 0;
if (btrfs_is_free_space_inode(inode)) {
WARN_ON_ONCE(1);
ret = -EINVAL;
goto out_unlock;
}
num_bytes = ALIGN(end - start + 1, blocksize);
num_bytes = max(blocksize, num_bytes);
disk_num_bytes = num_bytes;
/* if this is a small write inside eof, kick off defrag */
if (num_bytes < 64 * 1024 &&
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
btrfs_add_inode_defrag(NULL, inode);
if (start == 0) {
/* lets try to make an inline extent */
ret = cow_file_range_inline(root, inode, start, end, 0, 0,
NULL);
if (ret == 0) {
extent_clear_unlock_delalloc(inode, start, end, NULL,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DEFRAG, PAGE_UNLOCK |
PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
PAGE_END_WRITEBACK);
*nr_written = *nr_written +
(end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
*page_started = 1;
goto out;
} else if (ret < 0) {
goto out_unlock;
}
}
BUG_ON(disk_num_bytes >
btrfs_super_total_bytes(root->fs_info->super_copy));
alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
while (disk_num_bytes > 0) {
unsigned long op;
cur_alloc_size = disk_num_bytes;
ret = btrfs_reserve_extent(root, cur_alloc_size,
root->sectorsize, 0, alloc_hint,
&ins, 1, 1);
if (ret < 0)
goto out_unlock;
em = alloc_extent_map();
if (!em) {
ret = -ENOMEM;
goto out_reserve;
}
em->start = start;
em->orig_start = em->start;
ram_size = ins.offset;
em->len = ins.offset;
em->mod_start = em->start;
em->mod_len = em->len;
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->orig_block_len = ins.offset;
em->ram_bytes = ram_size;
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
em->generation = -1;
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 1);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
}
btrfs_drop_extent_cache(inode, start,
start + ram_size - 1, 0);
}
if (ret)
goto out_reserve;
cur_alloc_size = ins.offset;
ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
ram_size, cur_alloc_size, 0);
if (ret)
goto out_drop_extent_cache;
if (root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_reloc_clone_csums(inode, start,
cur_alloc_size);
if (ret)
goto out_drop_extent_cache;
}
if (disk_num_bytes < cur_alloc_size)
break;
/* we're not doing compressed IO, don't unlock the first
* page (which the caller expects to stay locked), don't
* clear any dirty bits and don't set any writeback bits
*
* Do set the Private2 bit so we know this page was properly
* setup for writepage
*/
op = unlock ? PAGE_UNLOCK : 0;
op |= PAGE_SET_PRIVATE2;
extent_clear_unlock_delalloc(inode, start,
start + ram_size - 1, locked_page,
EXTENT_LOCKED | EXTENT_DELALLOC,
op);
disk_num_bytes -= cur_alloc_size;
num_bytes -= cur_alloc_size;
alloc_hint = ins.objectid + ins.offset;
start += cur_alloc_size;
}
out:
return ret;
out_drop_extent_cache:
btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
out_reserve:
btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
out_unlock:
extent_clear_unlock_delalloc(inode, start, end, locked_page,
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
EXTENT_DELALLOC | EXTENT_DEFRAG,
PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
goto out;
}
/*
* work queue call back to started compression on a file and pages
*/
static noinline void async_cow_start(struct btrfs_work *work)
{
struct async_cow *async_cow;
int num_added = 0;
async_cow = container_of(work, struct async_cow, work);
compress_file_range(async_cow->inode, async_cow->locked_page,
async_cow->start, async_cow->end, async_cow,
&num_added);
if (num_added == 0) {
btrfs_add_delayed_iput(async_cow->inode);
async_cow->inode = NULL;
}
}
/*
* work queue call back to submit previously compressed pages
*/
static noinline void async_cow_submit(struct btrfs_work *work)
{
struct async_cow *async_cow;
struct btrfs_root *root;
unsigned long nr_pages;
async_cow = container_of(work, struct async_cow, work);
root = async_cow->root;
nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT;
/*
* atomic_sub_return implies a barrier for waitqueue_active
*/
if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
5 * 1024 * 1024 &&
waitqueue_active(&root->fs_info->async_submit_wait))
wake_up(&root->fs_info->async_submit_wait);
if (async_cow->inode)
submit_compressed_extents(async_cow->inode, async_cow);
}
static noinline void async_cow_free(struct btrfs_work *work)
{
struct async_cow *async_cow;
async_cow = container_of(work, struct async_cow, work);
if (async_cow->inode)
btrfs_add_delayed_iput(async_cow->inode);
kfree(async_cow);
}
static int cow_file_range_async(struct inode *inode, struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written)
{
struct async_cow *async_cow;
struct btrfs_root *root = BTRFS_I(inode)->root;
unsigned long nr_pages;
u64 cur_end;
int limit = 10 * 1024 * 1024;
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1, 0, NULL, GFP_NOFS);
while (start < end) {
async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
BUG_ON(!async_cow); /* -ENOMEM */
async_cow->inode = igrab(inode);
async_cow->root = root;
async_cow->locked_page = locked_page;
async_cow->start = start;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
!btrfs_test_opt(root, FORCE_COMPRESS))
cur_end = end;
else
cur_end = min(end, start + 512 * 1024 - 1);
async_cow->end = cur_end;
INIT_LIST_HEAD(&async_cow->extents);
btrfs_init_work(&async_cow->work,
btrfs_delalloc_helper,
async_cow_start, async_cow_submit,
async_cow_free);
nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT;
atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
btrfs_queue_work(root->fs_info->delalloc_workers,
&async_cow->work);
if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
wait_event(root->fs_info->async_submit_wait,
(atomic_read(&root->fs_info->async_delalloc_pages) <
limit));
}
while (atomic_read(&root->fs_info->async_submit_draining) &&
atomic_read(&root->fs_info->async_delalloc_pages)) {
wait_event(root->fs_info->async_submit_wait,
(atomic_read(&root->fs_info->async_delalloc_pages) ==
0));
}
*nr_written += nr_pages;
start = cur_end + 1;
}
*page_started = 1;
return 0;
}
static noinline int csum_exist_in_range(struct btrfs_root *root,
u64 bytenr, u64 num_bytes)
{
int ret;
struct btrfs_ordered_sum *sums;
LIST_HEAD(list);
ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
bytenr + num_bytes - 1, &list, 0);
if (ret == 0 && list_empty(&list))
return 0;
while (!list_empty(&list)) {
sums = list_entry(list.next, struct btrfs_ordered_sum, list);
list_del(&sums->list);
kfree(sums);
}
return 1;
}
/*
* when nowcow writeback call back. This checks for snapshots or COW copies
* of the extents that exist in the file, and COWs the file as required.
*
* If no cow copies or snapshots exist, we write directly to the existing
* blocks on disk
*/
static noinline int run_delalloc_nocow(struct inode *inode,
struct page *locked_page,
u64 start, u64 end, int *page_started, int force,
unsigned long *nr_written)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct extent_buffer *leaf;
struct btrfs_path *path;
struct btrfs_file_extent_item *fi;
struct btrfs_key found_key;
u64 cow_start;
u64 cur_offset;
u64 extent_end;
u64 extent_offset;
u64 disk_bytenr;
u64 num_bytes;
u64 disk_num_bytes;
u64 ram_bytes;
int extent_type;
int ret, err;
int type;
int nocow;
int check_prev = 1;
bool nolock;
u64 ino = btrfs_ino(inode);
path = btrfs_alloc_path();
if (!path) {
extent_clear_unlock_delalloc(inode, start, end, locked_page,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, PAGE_UNLOCK |
PAGE_CLEAR_DIRTY |
PAGE_SET_WRITEBACK |
PAGE_END_WRITEBACK);
return -ENOMEM;
}
nolock = btrfs_is_free_space_inode(inode);
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
extent_clear_unlock_delalloc(inode, start, end, locked_page,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, PAGE_UNLOCK |
PAGE_CLEAR_DIRTY |
PAGE_SET_WRITEBACK |
PAGE_END_WRITEBACK);
btrfs_free_path(path);
return PTR_ERR(trans);
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
cow_start = (u64)-1;
cur_offset = start;
while (1) {
ret = btrfs_lookup_file_extent(trans, root, path, ino,
cur_offset, 0);
if (ret < 0)
goto error;
if (ret > 0 && path->slots[0] > 0 && check_prev) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key,
path->slots[0] - 1);
if (found_key.objectid == ino &&
found_key.type == BTRFS_EXTENT_DATA_KEY)
path->slots[0]--;
}
check_prev = 0;
next_slot:
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
goto error;
if (ret > 0)
break;
leaf = path->nodes[0];
}
nocow = 0;
disk_bytenr = 0;
num_bytes = 0;
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid > ino ||
found_key.type > BTRFS_EXTENT_DATA_KEY ||
found_key.offset > end)
break;
if (found_key.offset > cur_offset) {
extent_end = found_key.offset;
extent_type = 0;
goto out_check;
}
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
if (extent_type == BTRFS_FILE_EXTENT_REG ||
extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
extent_offset = btrfs_file_extent_offset(leaf, fi);
extent_end = found_key.offset +
btrfs_file_extent_num_bytes(leaf, fi);
disk_num_bytes =
btrfs_file_extent_disk_num_bytes(leaf, fi);
if (extent_end <= start) {
path->slots[0]++;
goto next_slot;
}
if (disk_bytenr == 0)
goto out_check;
if (btrfs_file_extent_compression(leaf, fi) ||
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi))
goto out_check;
if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
goto out_check;
if (btrfs_extent_readonly(root, disk_bytenr))
goto out_check;
if (btrfs_cross_ref_exist(trans, root, ino,
found_key.offset -
extent_offset, disk_bytenr))
goto out_check;
disk_bytenr += extent_offset;
disk_bytenr += cur_offset - found_key.offset;
num_bytes = min(end + 1, extent_end) - cur_offset;
/*
* if there are pending snapshots for this root,
* we fall into common COW way.
*/
if (!nolock) {
err = btrfs_start_write_no_snapshoting(root);
if (!err)
goto out_check;
}
/*
* force cow if csum exists in the range.
* this ensure that csum for a given extent are
* either valid or do not exist.
*/
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
goto out_check;
nocow = 1;
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
extent_end = found_key.offset +
btrfs_file_extent_inline_len(leaf,
path->slots[0], fi);
extent_end = ALIGN(extent_end, root->sectorsize);
} else {
BUG_ON(1);
}
out_check:
if (extent_end <= start) {
path->slots[0]++;
if (!nolock && nocow)
btrfs_end_write_no_snapshoting(root);
goto next_slot;
}
if (!nocow) {
if (cow_start == (u64)-1)
cow_start = cur_offset;
cur_offset = extent_end;
if (cur_offset > end)
break;
path->slots[0]++;
goto next_slot;
}
btrfs_release_path(path);
if (cow_start != (u64)-1) {
ret = cow_file_range(inode, locked_page,
cow_start, found_key.offset - 1,
page_started, nr_written, 1);
if (ret) {
if (!nolock && nocow)
btrfs_end_write_no_snapshoting(root);
goto error;
}
cow_start = (u64)-1;
}
if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
struct extent_map *em;
struct extent_map_tree *em_tree;
em_tree = &BTRFS_I(inode)->extent_tree;
em = alloc_extent_map();
BUG_ON(!em); /* -ENOMEM */
em->start = cur_offset;
em->orig_start = found_key.offset - extent_offset;
em->len = num_bytes;
em->block_len = num_bytes;
em->block_start = disk_bytenr;
em->orig_block_len = disk_num_bytes;
em->ram_bytes = ram_bytes;
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->mod_start = em->start;
em->mod_len = em->len;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
set_bit(EXTENT_FLAG_FILLING, &em->flags);
em->generation = -1;
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 1);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
}
btrfs_drop_extent_cache(inode, em->start,
em->start + em->len - 1, 0);
}
type = BTRFS_ORDERED_PREALLOC;
} else {
type = BTRFS_ORDERED_NOCOW;
}
ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
num_bytes, num_bytes, type);
BUG_ON(ret); /* -ENOMEM */
if (root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_reloc_clone_csums(inode, cur_offset,
num_bytes);
if (ret) {
if (!nolock && nocow)
btrfs_end_write_no_snapshoting(root);
goto error;
}
}
extent_clear_unlock_delalloc(inode, cur_offset,
cur_offset + num_bytes - 1,
locked_page, EXTENT_LOCKED |
EXTENT_DELALLOC, PAGE_UNLOCK |
PAGE_SET_PRIVATE2);
if (!nolock && nocow)
btrfs_end_write_no_snapshoting(root);
cur_offset = extent_end;
if (cur_offset > end)
break;
}
btrfs_release_path(path);
if (cur_offset <= end && cow_start == (u64)-1) {
cow_start = cur_offset;
cur_offset = end;
}
if (cow_start != (u64)-1) {
ret = cow_file_range(inode, locked_page, cow_start, end,
page_started, nr_written, 1);
if (ret)
goto error;
}
error:
err = btrfs_end_transaction(trans, root);
if (!ret)
ret = err;
if (ret && cur_offset < end)
extent_clear_unlock_delalloc(inode, cur_offset, end,
locked_page, EXTENT_LOCKED |
EXTENT_DELALLOC | EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
PAGE_CLEAR_DIRTY |
PAGE_SET_WRITEBACK |
PAGE_END_WRITEBACK);
btrfs_free_path(path);
return ret;
}
static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
{
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
!(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
return 0;
/*
* @defrag_bytes is a hint value, no spinlock held here,
* if is not zero, it means the file is defragging.
* Force cow if given extent needs to be defragged.
*/
if (BTRFS_I(inode)->defrag_bytes &&
test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
EXTENT_DEFRAG, 0, NULL))
return 1;
return 0;
}
/*
* extent_io.c call back to do delayed allocation processing
*/
static int run_delalloc_range(struct inode *inode, struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written)
{
int ret;
int force_cow = need_force_cow(inode, start, end);
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 1, nr_written);
} else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written);
} else if (!inode_need_compress(inode)) {
ret = cow_file_range(inode, locked_page, start, end,
page_started, nr_written, 1);
} else {
set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
&BTRFS_I(inode)->runtime_flags);
ret = cow_file_range_async(inode, locked_page, start, end,
page_started, nr_written);
}
return ret;
}
static void btrfs_split_extent_hook(struct inode *inode,
struct extent_state *orig, u64 split)
{
u64 size;
/* not delalloc, ignore it */
if (!(orig->state & EXTENT_DELALLOC))
return;
size = orig->end - orig->start + 1;
if (size > BTRFS_MAX_EXTENT_SIZE) {
u64 num_extents;
u64 new_size;
/*
* See the explanation in btrfs_merge_extent_hook, the same
* applies here, just in reverse.
*/
new_size = orig->end - split + 1;
num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
BTRFS_MAX_EXTENT_SIZE);
new_size = split - orig->start;
num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
BTRFS_MAX_EXTENT_SIZE);
if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1,
BTRFS_MAX_EXTENT_SIZE) >= num_extents)
return;
}
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
}
/*
* extent_io.c merge_extent_hook, used to track merged delayed allocation
* extents so we can keep track of new extents that are just merged onto old
* extents, such as when we are doing sequential writes, so we can properly
* account for the metadata space we'll need.
*/
static void btrfs_merge_extent_hook(struct inode *inode,
struct extent_state *new,
struct extent_state *other)
{
u64 new_size, old_size;
u64 num_extents;
/* not delalloc, ignore it */
if (!(other->state & EXTENT_DELALLOC))
return;
if (new->start > other->start)
new_size = new->end - other->start + 1;
else
new_size = other->end - new->start + 1;
/* we're not bigger than the max, unreserve the space and go */
if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents--;
spin_unlock(&BTRFS_I(inode)->lock);
return;
}
/*
* We have to add up either side to figure out how many extents were
* accounted for before we merged into one big extent. If the number of
* extents we accounted for is <= the amount we need for the new range
* then we can return, otherwise drop. Think of it like this
*
* [ 4k][MAX_SIZE]
*
* So we've grown the extent by a MAX_SIZE extent, this would mean we
* need 2 outstanding extents, on one side we have 1 and the other side
* we have 1 so they are == and we can return. But in this case
*
* [MAX_SIZE+4k][MAX_SIZE+4k]
*
* Each range on their own accounts for 2 extents, but merged together
* they are only 3 extents worth of accounting, so we need to drop in
* this case.
*/
old_size = other->end - other->start + 1;
num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
BTRFS_MAX_EXTENT_SIZE);
old_size = new->end - new->start + 1;
num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
BTRFS_MAX_EXTENT_SIZE);
if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
BTRFS_MAX_EXTENT_SIZE) >= num_extents)
return;
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents--;
spin_unlock(&BTRFS_I(inode)->lock);
}
static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
struct inode *inode)
{
spin_lock(&root->delalloc_lock);
if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
&root->delalloc_inodes);
set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
&BTRFS_I(inode)->runtime_flags);
root->nr_delalloc_inodes++;
if (root->nr_delalloc_inodes == 1) {
spin_lock(&root->fs_info->delalloc_root_lock);
BUG_ON(!list_empty(&root->delalloc_root));
list_add_tail(&root->delalloc_root,
&root->fs_info->delalloc_roots);
spin_unlock(&root->fs_info->delalloc_root_lock);
}
}
spin_unlock(&root->delalloc_lock);
}
static void btrfs_del_delalloc_inode(struct btrfs_root *root,
struct inode *inode)
{
spin_lock(&root->delalloc_lock);
if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
list_del_init(&BTRFS_I(inode)->delalloc_inodes);
clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
&BTRFS_I(inode)->runtime_flags);
root->nr_delalloc_inodes--;
if (!root->nr_delalloc_inodes) {
spin_lock(&root->fs_info->delalloc_root_lock);
BUG_ON(list_empty(&root->delalloc_root));
list_del_init(&root->delalloc_root);
spin_unlock(&root->fs_info->delalloc_root_lock);
}
}
spin_unlock(&root->delalloc_lock);
}
/*
* extent_io.c set_bit_hook, used to track delayed allocation
* bytes in this file, and to maintain the list of inodes that
* have pending delalloc work to be done.
*/
static void btrfs_set_bit_hook(struct inode *inode,
struct extent_state *state, unsigned *bits)
{
if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
WARN_ON(1);
/*
* set_bit and clear bit hooks normally require _irqsave/restore
* but in this case, we are only testing for the DELALLOC
* bit, which is only set or cleared with irqs on
*/
if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
bool do_list = !btrfs_is_free_space_inode(inode);
if (*bits & EXTENT_FIRST_DELALLOC) {
*bits &= ~EXTENT_FIRST_DELALLOC;
} else {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
}
/* For sanity tests */
if (btrfs_test_is_dummy_root(root))
return;
__percpu_counter_add(&root->fs_info->delalloc_bytes, len,
root->fs_info->delalloc_batch);
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->delalloc_bytes += len;
if (*bits & EXTENT_DEFRAG)
BTRFS_I(inode)->defrag_bytes += len;
if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
&BTRFS_I(inode)->runtime_flags))
btrfs_add_delalloc_inodes(root, inode);
spin_unlock(&BTRFS_I(inode)->lock);
}
}
/*
* extent_io.c clear_bit_hook, see set_bit_hook for why
*/
static void btrfs_clear_bit_hook(struct inode *inode,
struct extent_state *state,
unsigned *bits)
{
u64 len = state->end + 1 - state->start;
u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1,
BTRFS_MAX_EXTENT_SIZE);
spin_lock(&BTRFS_I(inode)->lock);
if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG))
BTRFS_I(inode)->defrag_bytes -= len;
spin_unlock(&BTRFS_I(inode)->lock);
/*
* set_bit and clear bit hooks normally require _irqsave/restore
* but in this case, we are only testing for the DELALLOC
* bit, which is only set or cleared with irqs on
*/
if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
bool do_list = !btrfs_is_free_space_inode(inode);
if (*bits & EXTENT_FIRST_DELALLOC) {
*bits &= ~EXTENT_FIRST_DELALLOC;
} else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents -= num_extents;
spin_unlock(&BTRFS_I(inode)->lock);
}
/*
* We don't reserve metadata space for space cache inodes so we
* don't need to call dellalloc_release_metadata if there is an
* error.
*/
if (*bits & EXTENT_DO_ACCOUNTING &&
root != root->fs_info->tree_root)
btrfs_delalloc_release_metadata(inode, len);
/* For sanity tests. */
if (btrfs_test_is_dummy_root(root))
return;
if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
&& do_list && !(state->state & EXTENT_NORESERVE))
btrfs_free_reserved_data_space(inode, len);
__percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
root->fs_info->delalloc_batch);
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->delalloc_bytes -= len;
if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
&BTRFS_I(inode)->runtime_flags))
btrfs_del_delalloc_inode(root, inode);
spin_unlock(&BTRFS_I(inode)->lock);
}
}
/*
* extent_io.c merge_bio_hook, this must check the chunk tree to make sure
* we don't create bios that span stripes or chunks
*/
int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
size_t size, struct bio *bio,
unsigned long bio_flags)
{
struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
u64 logical = (u64)bio->bi_iter.bi_sector << 9;
u64 length = 0;
u64 map_length;
int ret;
if (bio_flags & EXTENT_BIO_COMPRESSED)
return 0;
length = bio->bi_iter.bi_size;
map_length = length;
ret = btrfs_map_block(root->fs_info, rw, logical,
&map_length, NULL, 0);
/* Will always return 0 with map_multi == NULL */
BUG_ON(ret < 0);
if (map_length < length + size)
return 1;
return 0;
}
/*
* in order to insert checksums into the metadata in large chunks,
* we wait until bio submission time. All the pages in the bio are
* checksummed and sums are attached onto the ordered extent record.
*
* At IO completion time the cums attached on the ordered extent record
* are inserted into the btree
*/
static int __btrfs_submit_bio_start(struct inode *inode, int rw,
struct bio *bio, int mirror_num,
unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
BUG_ON(ret); /* -ENOMEM */
return 0;
}
/*
* in order to insert checksums into the metadata in large chunks,
* we wait until bio submission time. All the pages in the bio are
* checksummed and sums are attached onto the ordered extent record.
*
* At IO completion time the cums attached on the ordered extent record
* are inserted into the btree
*/
static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
}
return ret;
}
/*
* extent_io.c submission hook. This does the right thing for csum calculation
* on write, or reading the csums from the tree before a read
*/
static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
int skip_sum;
int metadata = 0;
int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
if (btrfs_is_free_space_inode(inode))
metadata = 2;
if (!(rw & REQ_WRITE)) {
ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
if (ret)
goto out;
if (bio_flags & EXTENT_BIO_COMPRESSED) {
ret = btrfs_submit_compressed_read(inode, bio,
mirror_num,
bio_flags);
goto out;
} else if (!skip_sum) {
ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
if (ret)
goto out;
}
goto mapit;
} else if (async && !skip_sum) {
/* csum items have already been cloned */
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
goto mapit;
/* we're doing a write, do the async checksumming */
ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
inode, rw, bio, mirror_num,
bio_flags, bio_offset,
__btrfs_submit_bio_start,
__btrfs_submit_bio_done);
goto out;
} else if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
if (ret)
goto out;
}
mapit:
ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
out:
if (ret < 0) {
bio->bi_error = ret;
bio_endio(bio);
}
return ret;
}
/*
* given a list of ordered sums record them in the inode. This happens
* at IO completion time based on sums calculated at bio submission time.
*/
static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
struct inode *inode, u64 file_offset,
struct list_head *list)
{
struct btrfs_ordered_sum *sum;
list_for_each_entry(sum, list, list) {
trans->adding_csums = 1;
btrfs_csum_file_blocks(trans,
BTRFS_I(inode)->root->fs_info->csum_root, sum);
trans->adding_csums = 0;
}
return 0;
}
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
struct extent_state **cached_state)
{
WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
cached_state, GFP_NOFS);
}
/* see btrfs_writepage_start_hook for details on why this is required */
struct btrfs_writepage_fixup {
struct page *page;
struct btrfs_work work;
};
static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
{
struct btrfs_writepage_fixup *fixup;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
struct page *page;
struct inode *inode;
u64 page_start;
u64 page_end;
int ret;
fixup = container_of(work, struct btrfs_writepage_fixup, work);
page = fixup->page;
again:
lock_page(page);
if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
ClearPageChecked(page);
goto out_page;
}
inode = page->mapping->host;
page_start = page_offset(page);
page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
&cached_state);
/* already ordered? We're done */
if (PagePrivate2(page))
goto out;
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
page_end, &cached_state, GFP_NOFS);
unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
goto again;
}
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
if (ret) {
mapping_set_error(page->mapping, ret);
end_extent_writepage(page, ret, page_start, page_end);
ClearPageChecked(page);
goto out;
}
btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
ClearPageChecked(page);
set_page_dirty(page);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
out_page:
unlock_page(page);
page_cache_release(page);
kfree(fixup);
}
/*
* There are a few paths in the higher layers of the kernel that directly
* set the page dirty bit without asking the filesystem if it is a
* good idea. This causes problems because we want to make sure COW
* properly happens and the data=ordered rules are followed.
*
* In our case any range that doesn't have the ORDERED bit set
* hasn't been properly setup for IO. We kick off an async process
* to fix it up. The async helper will wait for ordered extents, set
* the delalloc bit and make it safe to write the page.
*/
static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
{
struct inode *inode = page->mapping->host;
struct btrfs_writepage_fixup *fixup;
struct btrfs_root *root = BTRFS_I(inode)->root;
/* this page is properly in the ordered list */
if (TestClearPagePrivate2(page))
return 0;
if (PageChecked(page))
return -EAGAIN;
fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
if (!fixup)
return -EAGAIN;
SetPageChecked(page);
page_cache_get(page);
btrfs_init_work(&fixup->work, btrfs_fixup_helper,
btrfs_writepage_fixup_worker, NULL, NULL);
fixup->page = page;
btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
return -EBUSY;
}
static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
struct inode *inode, u64 file_pos,
u64 disk_bytenr, u64 disk_num_bytes,
u64 num_bytes, u64 ram_bytes,
u8 compression, u8 encryption,
u16 other_encoding, int extent_type)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_file_extent_item *fi;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key ins;
int extent_inserted = 0;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
/*
* we may be replacing one extent in the tree with another.
* The new extent is pinned in the extent map, and we don't want
* to drop it from the cache until it is completely in the btree.
*
* So, tell btrfs_drop_extents to leave this extent in the cache.
* the caller is expected to unpin it and allow it to be merged
* with the others.
*/
ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
file_pos + num_bytes, NULL, 0,
1, sizeof(*fi), &extent_inserted);
if (ret)
goto out;
if (!extent_inserted) {
ins.objectid = btrfs_ino(inode);
ins.offset = file_pos;
ins.type = BTRFS_EXTENT_DATA_KEY;
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, root, path, &ins,
sizeof(*fi));
if (ret)
goto out;
}
leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_set_file_extent_type(leaf, fi, extent_type);
btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
btrfs_set_file_extent_compression(leaf, fi, compression);
btrfs_set_file_extent_encryption(leaf, fi, encryption);
btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
inode_add_bytes(inode, num_bytes);
ins.objectid = disk_bytenr;
ins.offset = disk_num_bytes;
ins.type = BTRFS_EXTENT_ITEM_KEY;
ret = btrfs_alloc_reserved_file_extent(trans, root,
root->root_key.objectid,
btrfs_ino(inode), file_pos, &ins);
out:
btrfs_free_path(path);
return ret;
}
/* snapshot-aware defrag */
struct sa_defrag_extent_backref {
struct rb_node node;
struct old_sa_defrag_extent *old;
u64 root_id;
u64 inum;
u64 file_pos;
u64 extent_offset;
u64 num_bytes;
u64 generation;
};
struct old_sa_defrag_extent {
struct list_head list;
struct new_sa_defrag_extent *new;
u64 extent_offset;
u64 bytenr;
u64 offset;
u64 len;
int count;
};
struct new_sa_defrag_extent {
struct rb_root root;
struct list_head head;
struct btrfs_path *path;
struct inode *inode;
u64 file_pos;
u64 len;
u64 bytenr;
u64 disk_len;
u8 compress_type;
};
static int backref_comp(struct sa_defrag_extent_backref *b1,
struct sa_defrag_extent_backref *b2)
{
if (b1->root_id < b2->root_id)
return -1;
else if (b1->root_id > b2->root_id)
return 1;
if (b1->inum < b2->inum)
return -1;
else if (b1->inum > b2->inum)
return 1;
if (b1->file_pos < b2->file_pos)
return -1;
else if (b1->file_pos > b2->file_pos)
return 1;
/*
* [------------------------------] ===> (a range of space)
* |<--->| |<---->| =============> (fs/file tree A)
* |<---------------------------->| ===> (fs/file tree B)
*
* A range of space can refer to two file extents in one tree while
* refer to only one file extent in another tree.
*
* So we may process a disk offset more than one time(two extents in A)
* and locate at the same extent(one extent in B), then insert two same
* backrefs(both refer to the extent in B).
*/
return 0;
}
static void backref_insert(struct rb_root *root,
struct sa_defrag_extent_backref *backref)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct sa_defrag_extent_backref *entry;
int ret;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
ret = backref_comp(backref, entry);
if (ret < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&backref->node, parent, p);
rb_insert_color(&backref->node, root);
}
/*
* Note the backref might has changed, and in this case we just return 0.
*/
static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
void *ctx)
{
struct btrfs_file_extent_item *extent;
struct btrfs_fs_info *fs_info;
struct old_sa_defrag_extent *old = ctx;
struct new_sa_defrag_extent *new = old->new;
struct btrfs_path *path = new->path;
struct btrfs_key key;
struct btrfs_root *root;
struct sa_defrag_extent_backref *backref;
struct extent_buffer *leaf;
struct inode *inode = new->inode;
int slot;
int ret;
u64 extent_offset;
u64 num_bytes;
if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
inum == btrfs_ino(inode))
return 0;
key.objectid = root_id;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
fs_info = BTRFS_I(inode)->root->fs_info;
root = btrfs_read_fs_root_no_name(fs_info, &key);
if (IS_ERR(root)) {
if (PTR_ERR(root) == -ENOENT)
return 0;
WARN_ON(1);
pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
inum, offset, root_id);
return PTR_ERR(root);
}
key.objectid = inum;
key.type = BTRFS_EXTENT_DATA_KEY;
if (offset > (u64)-1 << 32)
key.offset = 0;
else
key.offset = offset;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (WARN_ON(ret < 0))
return ret;
ret = 0;
while (1) {
cond_resched();
leaf = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0) {
goto out;
} else if (ret > 0) {
ret = 0;
goto out;
}
continue;
}
path->slots[0]++;
btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.objectid > inum)
goto out;
if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
continue;
extent = btrfs_item_ptr(leaf, slot,
struct btrfs_file_extent_item);
if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
continue;
/*
* 'offset' refers to the exact key.offset,
* NOT the 'offset' field in btrfs_extent_data_ref, ie.
* (key.offset - extent_offset).
*/
if (key.offset != offset)
continue;
extent_offset = btrfs_file_extent_offset(leaf, extent);
num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
if (extent_offset >= old->extent_offset + old->offset +
old->len || extent_offset + num_bytes <=
old->extent_offset + old->offset)
continue;
break;
}
backref = kmalloc(sizeof(*backref), GFP_NOFS);
if (!backref) {
ret = -ENOENT;
goto out;
}
backref->root_id = root_id;
backref->inum = inum;
backref->file_pos = offset;
backref->num_bytes = num_bytes;
backref->extent_offset = extent_offset;
backref->generation = btrfs_file_extent_generation(leaf, extent);
backref->old = old;
backref_insert(&new->root, backref);
old->count++;
out:
btrfs_release_path(path);
WARN_ON(ret);
return ret;
}
static noinline bool record_extent_backrefs(struct btrfs_path *path,
struct new_sa_defrag_extent *new)
{
struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
struct old_sa_defrag_extent *old, *tmp;
int ret;
new->path = path;
list_for_each_entry_safe(old, tmp, &new->head, list) {
ret = iterate_inodes_from_logical(old->bytenr +
old->extent_offset, fs_info,
path, record_one_backref,
old);
if (ret < 0 && ret != -ENOENT)
return false;
/* no backref to be processed for this extent */
if (!old->count) {
list_del(&old->list);
kfree(old);
}
}
if (list_empty(&new->head))
return false;
return true;
}
static int relink_is_mergable(struct extent_buffer *leaf,
struct btrfs_file_extent_item *fi,
struct new_sa_defrag_extent *new)
{
if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
return 0;
if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
return 0;
if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
return 0;
if (btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi))
return 0;
return 1;
}
/*
* Note the backref might has changed, and in this case we just return 0.
*/
static noinline int relink_extent_backref(struct btrfs_path *path,
struct sa_defrag_extent_backref *prev,
struct sa_defrag_extent_backref *backref)
{
struct btrfs_file_extent_item *extent;
struct btrfs_file_extent_item *item;
struct btrfs_ordered_extent *ordered;
struct btrfs_trans_handle *trans;
struct btrfs_fs_info *fs_info;
struct btrfs_root *root;
struct btrfs_key key;
struct extent_buffer *leaf;
struct old_sa_defrag_extent *old = backref->old;
struct new_sa_defrag_extent *new = old->new;
struct inode *src_inode = new->inode;
struct inode *inode;
struct extent_state *cached = NULL;
int ret = 0;
u64 start;
u64 len;
u64 lock_start;
u64 lock_end;
bool merge = false;
int index;
if (prev && prev->root_id == backref->root_id &&
prev->inum == backref->inum &&
prev->file_pos + prev->num_bytes == backref->file_pos)
merge = true;
/* step 1: get root */
key.objectid = backref->root_id;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
fs_info = BTRFS_I(src_inode)->root->fs_info;
index = srcu_read_lock(&fs_info->subvol_srcu);
root = btrfs_read_fs_root_no_name(fs_info, &key);
if (IS_ERR(root)) {
srcu_read_unlock(&fs_info->subvol_srcu, index);
if (PTR_ERR(root) == -ENOENT)
return 0;
return PTR_ERR(root);
}
if (btrfs_root_readonly(root)) {
srcu_read_unlock(&fs_info->subvol_srcu, index);
return 0;
}
/* step 2: get inode */
key.objectid = backref->inum;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
if (IS_ERR(inode)) {
srcu_read_unlock(&fs_info->subvol_srcu, index);
return 0;
}
srcu_read_unlock(&fs_info->subvol_srcu, index);
/* step 3: relink backref */
lock_start = backref->file_pos;
lock_end = backref->file_pos + backref->num_bytes - 1;
lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
0, &cached);
ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
if (ordered) {
btrfs_put_ordered_extent(ordered);
goto out_unlock;
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_unlock;
}
key.objectid = backref->inum;
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = backref->file_pos;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
goto out_free_path;
} else if (ret > 0) {
ret = 0;
goto out_free_path;
}
extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_file_extent_item);
if (btrfs_file_extent_generation(path->nodes[0], extent) !=
backref->generation)
goto out_free_path;
btrfs_release_path(path);
start = backref->file_pos;
if (backref->extent_offset < old->extent_offset + old->offset)
start += old->extent_offset + old->offset -
backref->extent_offset;
len = min(backref->extent_offset + backref->num_bytes,
old->extent_offset + old->offset + old->len);
len -= max(backref->extent_offset, old->extent_offset + old->offset);
ret = btrfs_drop_extents(trans, root, inode, start,
start + len, 1);
if (ret)
goto out_free_path;
again:
key.objectid = btrfs_ino(inode);
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = start;
path->leave_spinning = 1;
if (merge) {
struct btrfs_file_extent_item *fi;
u64 extent_len;
struct btrfs_key found_key;
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0)
goto out_free_path;
path->slots[0]--;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_len = btrfs_file_extent_num_bytes(leaf, fi);
if (extent_len + found_key.offset == start &&
relink_is_mergable(leaf, fi, new)) {
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_len + len);
btrfs_mark_buffer_dirty(leaf);
inode_add_bytes(inode, len);
ret = 1;
goto out_free_path;
} else {
merge = false;
btrfs_release_path(path);
goto again;
}
}
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(*extent));
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_free_path;
}
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
btrfs_set_file_extent_num_bytes(leaf, item, len);
btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
btrfs_set_file_extent_generation(leaf, item, trans->transid);
btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
btrfs_set_file_extent_compression(leaf, item, new->compress_type);
btrfs_set_file_extent_encryption(leaf, item, 0);
btrfs_set_file_extent_other_encoding(leaf, item, 0);
btrfs_mark_buffer_dirty(leaf);
inode_add_bytes(inode, len);
btrfs_release_path(path);
ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
new->disk_len, 0,
backref->root_id, backref->inum,
new->file_pos, 0); /* start - extent_offset */
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_free_path;
}
ret = 1;
out_free_path:
btrfs_release_path(path);
path->leave_spinning = 0;
btrfs_end_transaction(trans, root);
out_unlock:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
&cached, GFP_NOFS);
iput(inode);
return ret;
}
static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
{
struct old_sa_defrag_extent *old, *tmp;
if (!new)
return;
list_for_each_entry_safe(old, tmp, &new->head, list) {
list_del(&old->list);
kfree(old);
}
kfree(new);
}
static void relink_file_extents(struct new_sa_defrag_extent *new)
{
struct btrfs_path *path;
struct sa_defrag_extent_backref *backref;
struct sa_defrag_extent_backref *prev = NULL;
struct inode *inode;
struct btrfs_root *root;
struct rb_node *node;
int ret;
inode = new->inode;
root = BTRFS_I(inode)->root;
path = btrfs_alloc_path();
if (!path)
return;
if (!record_extent_backrefs(path, new)) {
btrfs_free_path(path);
goto out;
}
btrfs_release_path(path);
while (1) {
node = rb_first(&new->root);
if (!node)
break;
rb_erase(node, &new->root);
backref = rb_entry(node, struct sa_defrag_extent_backref, node);
ret = relink_extent_backref(path, prev, backref);
WARN_ON(ret < 0);
kfree(prev);
if (ret == 1)
prev = backref;
else
prev = NULL;
cond_resched();
}
kfree(prev);
btrfs_free_path(path);
out:
free_sa_defrag_extent(new);
atomic_dec(&root->fs_info->defrag_running);
wake_up(&root->fs_info->transaction_wait);
}
static struct new_sa_defrag_extent *
record_old_file_extents(struct inode *inode,
struct btrfs_ordered_extent *ordered)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path;
struct btrfs_key key;
struct old_sa_defrag_extent *old;
struct new_sa_defrag_extent *new;
int ret;
new = kmalloc(sizeof(*new), GFP_NOFS);
if (!new)
return NULL;
new->inode = inode;
new->file_pos = ordered->file_offset;
new->len = ordered->len;
new->bytenr = ordered->start;
new->disk_len = ordered->disk_len;
new->compress_type = ordered->compress_type;
new->root = RB_ROOT;
INIT_LIST_HEAD(&new->head);
path = btrfs_alloc_path();
if (!path)
goto out_kfree;
key.objectid = btrfs_ino(inode);
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = new->file_pos;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out_free_path;
if (ret > 0 && path->slots[0] > 0)
path->slots[0]--;
/* find out all the old extents for the file range */
while (1) {
struct btrfs_file_extent_item *extent;
struct extent_buffer *l;
int slot;
u64 num_bytes;
u64 offset;
u64 end;
u64 disk_bytenr;
u64 extent_offset;
l = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(l)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
goto out_free_path;
else if (ret > 0)
break;
continue;
}
btrfs_item_key_to_cpu(l, &key, slot);
if (key.objectid != btrfs_ino(inode))
break;
if (key.type != BTRFS_EXTENT_DATA_KEY)
break;
if (key.offset >= new->file_pos + new->len)
break;
extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
num_bytes = btrfs_file_extent_num_bytes(l, extent);
if (key.offset + num_bytes < new->file_pos)
goto next;
disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
if (!disk_bytenr)
goto next;
extent_offset = btrfs_file_extent_offset(l, extent);
old = kmalloc(sizeof(*old), GFP_NOFS);
if (!old)
goto out_free_path;
offset = max(new->file_pos, key.offset);
end = min(new->file_pos + new->len, key.offset + num_bytes);
old->bytenr = disk_bytenr;
old->extent_offset = extent_offset;
old->offset = offset - key.offset;
old->len = end - offset;
old->new = new;
old->count = 0;
list_add_tail(&old->list, &new->head);
next:
path->slots[0]++;
cond_resched();
}
btrfs_free_path(path);
atomic_inc(&root->fs_info->defrag_running);
return new;
out_free_path:
btrfs_free_path(path);
out_kfree:
free_sa_defrag_extent(new);
return NULL;
}
static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
u64 start, u64 len)
{
struct btrfs_block_group_cache *cache;
cache = btrfs_lookup_block_group(root->fs_info, start);
ASSERT(cache);
spin_lock(&cache->lock);
cache->delalloc_bytes -= len;
spin_unlock(&cache->lock);
btrfs_put_block_group(cache);
}
/* as ordered data IO finishes, this gets called so we can finish
* an ordered extent if the range of bytes in the file it covers are
* fully written.
*/
static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
{
struct inode *inode = ordered_extent->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_state *cached_state = NULL;
struct new_sa_defrag_extent *new = NULL;
int compress_type = 0;
int ret = 0;
u64 logical_len = ordered_extent->len;
bool nolock;
bool truncated = false;
nolock = btrfs_is_free_space_inode(inode);
if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
ret = -EIO;
goto out;
}
btrfs_free_io_failure_record(inode, ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len - 1);
if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
truncated = true;
logical_len = ordered_extent->truncated_len;
/* Truncated the entire extent, don't bother adding */
if (!logical_len)
goto out;
}
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
btrfs_ordered_update_i_size(inode, 0, ordered_extent);
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto out;
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, root, ret);
goto out;
}
lock_extent_bits(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
0, &cached_state);
ret = test_range_bit(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
EXTENT_DEFRAG, 1, cached_state);
if (ret) {
u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
if (0 && last_snapshot >= BTRFS_I(inode)->generation)
/* the inode is shared */
new = record_old_file_extents(inode, ordered_extent);
clear_extent_bit(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
}
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto out_unlock;
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compress_type = ordered_extent->compress_type;
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
BUG_ON(compress_type);
ret = btrfs_mark_extent_written(trans, inode,
ordered_extent->file_offset,
ordered_extent->file_offset +
logical_len);
} else {
BUG_ON(root == root->fs_info->tree_root);
ret = insert_reserved_file_extent(trans, inode,
ordered_extent->file_offset,
ordered_extent->start,
ordered_extent->disk_len,
logical_len, logical_len,
compress_type, 0, 0,
BTRFS_FILE_EXTENT_REG);
if (!ret)
btrfs_release_delalloc_bytes(root,
ordered_extent->start,
ordered_extent->disk_len);
}
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
ordered_extent->file_offset, ordered_extent->len,
trans->transid);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto out_unlock;
}
add_pending_csums(trans, inode, ordered_extent->file_offset,
&ordered_extent->list);
btrfs_ordered_update_i_size(inode, 0, ordered_extent);
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) { /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, root, ret);
goto out_unlock;
}
ret = 0;
out_unlock:
unlock_extent_cached(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len - 1, &cached_state, GFP_NOFS);
out:
if (root != root->fs_info->tree_root)
btrfs_delalloc_release_metadata(inode, ordered_extent->len);
if (trans)
btrfs_end_transaction(trans, root);
if (ret || truncated) {
u64 start, end;
if (truncated)
start = ordered_extent->file_offset + logical_len;
else
start = ordered_extent->file_offset;
end = ordered_extent->file_offset + ordered_extent->len - 1;
clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
/* Drop the cache for the part of the extent we didn't write. */
btrfs_drop_extent_cache(inode, start, end, 0);
/*
* If the ordered extent had an IOERR or something else went
* wrong we need to return the space for this ordered extent
* back to the allocator. We only free the extent in the
* truncated case if we didn't write out the extent at all.
*/
if ((ret || !logical_len) &&
!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
btrfs_free_reserved_extent(root, ordered_extent->start,
ordered_extent->disk_len, 1);
}
/*
* This needs to be done to make sure anybody waiting knows we are done
* updating everything for this ordered extent.
*/
btrfs_remove_ordered_extent(inode, ordered_extent);
/* for snapshot-aware defrag */
if (new) {
if (ret) {
free_sa_defrag_extent(new);
atomic_dec(&root->fs_info->defrag_running);
} else {
relink_file_extents(new);
}
}
/* once for us */
btrfs_put_ordered_extent(ordered_extent);
/* once for the tree */
btrfs_put_ordered_extent(ordered_extent);
return ret;
}
static void finish_ordered_fn(struct btrfs_work *work)
{
struct btrfs_ordered_extent *ordered_extent;
ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
btrfs_finish_ordered_io(ordered_extent);
}
static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate)
{
struct inode *inode = page->mapping->host;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ordered_extent *ordered_extent = NULL;
struct btrfs_workqueue *wq;
btrfs_work_func_t func;
trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
ClearPagePrivate2(page);
if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
end - start + 1, uptodate))
return 0;
if (btrfs_is_free_space_inode(inode)) {
wq = root->fs_info->endio_freespace_worker;
func = btrfs_freespace_write_helper;
} else {
wq = root->fs_info->endio_write_workers;
func = btrfs_endio_write_helper;
}
btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
NULL);
btrfs_queue_work(wq, &ordered_extent->work);
return 0;
}
static int __readpage_endio_check(struct inode *inode,
struct btrfs_io_bio *io_bio,
int icsum, struct page *page,
int pgoff, u64 start, size_t len)
{
char *kaddr;
u32 csum_expected;
u32 csum = ~(u32)0;
csum_expected = *(((u32 *)io_bio->csum) + icsum);
kaddr = kmap_atomic(page);
csum = btrfs_csum_data(kaddr + pgoff, csum, len);
btrfs_csum_final(csum, (char *)&csum);
if (csum != csum_expected)
goto zeroit;
kunmap_atomic(kaddr);
return 0;
zeroit:
btrfs_warn_rl(BTRFS_I(inode)->root->fs_info,
"csum failed ino %llu off %llu csum %u expected csum %u",
btrfs_ino(inode), start, csum, csum_expected);
memset(kaddr + pgoff, 1, len);
flush_dcache_page(page);
kunmap_atomic(kaddr);
if (csum_expected == 0)
return 0;
return -EIO;
}
/*
* when reads are done, we need to check csums to verify the data is correct
* if there's a match, we allow the bio to finish. If not, the code in
* extent_io.c will try to find good copies for us.
*/
static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
u64 phy_offset, struct page *page,
u64 start, u64 end, int mirror)
{
size_t offset = start - page_offset(page);
struct inode *inode = page->mapping->host;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_root *root = BTRFS_I(inode)->root;
if (PageChecked(page)) {
ClearPageChecked(page);
return 0;
}
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
return 0;
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
GFP_NOFS);
return 0;
}
phy_offset >>= inode->i_sb->s_blocksize_bits;
return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
start, (size_t)(end - start + 1));
}
struct delayed_iput {
struct list_head list;
struct inode *inode;
};
/* JDM: If this is fs-wide, why can't we add a pointer to
* btrfs_inode instead and avoid the allocation? */
void btrfs_add_delayed_iput(struct inode *inode)
{
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct delayed_iput *delayed;
if (atomic_add_unless(&inode->i_count, -1, 1))
return;
delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
delayed->inode = inode;
spin_lock(&fs_info->delayed_iput_lock);
list_add_tail(&delayed->list, &fs_info->delayed_iputs);
spin_unlock(&fs_info->delayed_iput_lock);
}
void btrfs_run_delayed_iputs(struct btrfs_root *root)
{
LIST_HEAD(list);
struct btrfs_fs_info *fs_info = root->fs_info;
struct delayed_iput *delayed;
int empty;
spin_lock(&fs_info->delayed_iput_lock);
empty = list_empty(&fs_info->delayed_iputs);
spin_unlock(&fs_info->delayed_iput_lock);
if (empty)
return;
down_read(&fs_info->delayed_iput_sem);
spin_lock(&fs_info->delayed_iput_lock);
list_splice_init(&fs_info->delayed_iputs, &list);
spin_unlock(&fs_info->delayed_iput_lock);
while (!list_empty(&list)) {
delayed = list_entry(list.next, struct delayed_iput, list);
list_del(&delayed->list);
iput(delayed->inode);
kfree(delayed);
}
up_read(&root->fs_info->delayed_iput_sem);
}
/*
* This is called in transaction commit time. If there are no orphan
* files in the subvolume, it removes orphan item and frees block_rsv
* structure.
*/
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_block_rsv *block_rsv;
int ret;
if (atomic_read(&root->orphan_inodes) ||
root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
return;
spin_lock(&root->orphan_lock);
if (atomic_read(&root->orphan_inodes)) {
spin_unlock(&root->orphan_lock);
return;
}
if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
spin_unlock(&root->orphan_lock);
return;
}
block_rsv = root->orphan_block_rsv;
root->orphan_block_rsv = NULL;
spin_unlock(&root->orphan_lock);
if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) &&
btrfs_root_refs(&root->root_item) > 0) {
ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
root->root_key.objectid);
if (ret)
btrfs_abort_transaction(trans, root, ret);
else
clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
&root->state);
}
if (block_rsv) {
WARN_ON(block_rsv->size > 0);
btrfs_free_block_rsv(root, block_rsv);
}
}
/*
* This creates an orphan entry for the given inode in case something goes
* wrong in the middle of an unlink/truncate.
*
* NOTE: caller of this function should reserve 5 units of metadata for
* this function.
*/
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *block_rsv = NULL;
int reserve = 0;
int insert = 0;
int ret;
if (!root->orphan_block_rsv) {
block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
if (!block_rsv)
return -ENOMEM;
}
spin_lock(&root->orphan_lock);
if (!root->orphan_block_rsv) {
root->orphan_block_rsv = block_rsv;
} else if (block_rsv) {
btrfs_free_block_rsv(root, block_rsv);
block_rsv = NULL;
}
if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags)) {
#if 0
/*
* For proper ENOSPC handling, we should do orphan
* cleanup when mounting. But this introduces backward
* compatibility issue.
*/
if (!xchg(&root->orphan_item_inserted, 1))
insert = 2;
else
insert = 1;
#endif
insert = 1;
atomic_inc(&root->orphan_inodes);
}
if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
&BTRFS_I(inode)->runtime_flags))
reserve = 1;
spin_unlock(&root->orphan_lock);
/* grab metadata reservation from transaction handle */
if (reserve) {
ret = btrfs_orphan_reserve_metadata(trans, inode);
BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
}
/* insert an orphan item to track this unlinked/truncated file */
if (insert >= 1) {
ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
if (ret) {
atomic_dec(&root->orphan_inodes);
if (reserve) {
clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
&BTRFS_I(inode)->runtime_flags);
btrfs_orphan_release_metadata(inode);
}
if (ret != -EEXIST) {
clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags);
btrfs_abort_transaction(trans, root, ret);
return ret;
}
}
ret = 0;
}
/* insert an orphan item to track subvolume contains orphan files */
if (insert >= 2) {
ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
root->root_key.objectid);
if (ret && ret != -EEXIST) {
btrfs_abort_transaction(trans, root, ret);
return ret;
}
}
return 0;
}
/*
* We have done the truncate/delete so we can go ahead and remove the orphan
* item for this particular inode.
*/
static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int delete_item = 0;
int release_rsv = 0;
int ret = 0;
spin_lock(&root->orphan_lock);
if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags))
delete_item = 1;
if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
&BTRFS_I(inode)->runtime_flags))
release_rsv = 1;
spin_unlock(&root->orphan_lock);
if (delete_item) {
atomic_dec(&root->orphan_inodes);
if (trans)
ret = btrfs_del_orphan_item(trans, root,
btrfs_ino(inode));
}
if (release_rsv)
btrfs_orphan_release_metadata(inode);
return ret;
}
/*
* this cleans up any orphans that may be left on the list from the last use
* of this root.
*/
int btrfs_orphan_cleanup(struct btrfs_root *root)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key key, found_key;
struct btrfs_trans_handle *trans;
struct inode *inode;
u64 last_objectid = 0;
int ret = 0, nr_unlink = 0, nr_truncate = 0;
if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
return 0;
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
path->reada = -1;
key.objectid = BTRFS_ORPHAN_OBJECTID;
key.type = BTRFS_ORPHAN_ITEM_KEY;
key.offset = (u64)-1;
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
/*
* if ret == 0 means we found what we were searching for, which
* is weird, but possible, so only screw with path if we didn't
* find the key and see if we have stuff that matches
*/
if (ret > 0) {
ret = 0;
if (path->slots[0] == 0)
break;
path->slots[0]--;
}
/* pull out the item */
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
/* make sure the item matches what we want */
if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
break;
if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
break;
/* release the path since we're done with it */
btrfs_release_path(path);
/*
* this is where we are basically btrfs_lookup, without the
* crossing root thing. we store the inode number in the
* offset of the orphan item.
*/
if (found_key.offset == last_objectid) {
btrfs_err(root->fs_info,
"Error removing orphan entry, stopping orphan cleanup");
ret = -EINVAL;
goto out;
}
last_objectid = found_key.offset;
found_key.objectid = found_key.offset;
found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0;
inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
ret = PTR_ERR_OR_ZERO(inode);
if (ret && ret != -ESTALE)
goto out;
if (ret == -ESTALE && root == root->fs_info->tree_root) {
struct btrfs_root *dead_root;
struct btrfs_fs_info *fs_info = root->fs_info;
int is_dead_root = 0;
/*
* this is an orphan in the tree root. Currently these
* could come from 2 sources:
* a) a snapshot deletion in progress
* b) a free space cache inode
* We need to distinguish those two, as the snapshot
* orphan must not get deleted.
* find_dead_roots already ran before us, so if this
* is a snapshot deletion, we should find the root
* in the dead_roots list
*/
spin_lock(&fs_info->trans_lock);
list_for_each_entry(dead_root, &fs_info->dead_roots,
root_list) {
if (dead_root->root_key.objectid ==
found_key.objectid) {
is_dead_root = 1;
break;
}
}
spin_unlock(&fs_info->trans_lock);
if (is_dead_root) {
/* prevent this orphan from being found again */
key.offset = found_key.objectid - 1;
continue;
}
}
/*
* Inode is already gone but the orphan item is still there,
* kill the orphan item.
*/
if (ret == -ESTALE) {
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
btrfs_debug(root->fs_info, "auto deleting %Lu",
found_key.objectid);
ret = btrfs_del_orphan_item(trans, root,
found_key.objectid);
btrfs_end_transaction(trans, root);
if (ret)
goto out;
continue;
}
/*
* add this inode to the orphan list so btrfs_orphan_del does
* the proper thing when we hit it
*/
set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags);
atomic_inc(&root->orphan_inodes);
/* if we have links, this was a truncate, lets do that */
if (inode->i_nlink) {
if (WARN_ON(!S_ISREG(inode->i_mode))) {
iput(inode);
continue;
}
nr_truncate++;
/* 1 for the orphan item deletion. */
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
iput(inode);
ret = PTR_ERR(trans);
goto out;
}
ret = btrfs_orphan_add(trans, inode);
btrfs_end_transaction(trans, root);
if (ret) {
iput(inode);
goto out;
}
ret = btrfs_truncate(inode);
if (ret)
btrfs_orphan_del(NULL, inode);
} else {
nr_unlink++;
}
/* this will do delete_inode and everything for us */
iput(inode);
if (ret)
goto out;
}
/* release the path since we're done with it */
btrfs_release_path(path);
root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
if (root->orphan_block_rsv)
btrfs_block_rsv_release(root, root->orphan_block_rsv,
(u64)-1);
if (root->orphan_block_rsv ||
test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
trans = btrfs_join_transaction(root);
if (!IS_ERR(trans))
btrfs_end_transaction(trans, root);
}
if (nr_unlink)
btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
if (nr_truncate)
btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
out:
if (ret)
btrfs_err(root->fs_info,
"could not do orphan cleanup %d", ret);
btrfs_free_path(path);
return ret;
}
/*
* very simple check to peek ahead in the leaf looking for xattrs. If we
* don't find any xattrs, we know there can't be any acls.
*
* slot is the slot the inode is in, objectid is the objectid of the inode
*/
static noinline int acls_after_inode_item(struct extent_buffer *leaf,
int slot, u64 objectid,
int *first_xattr_slot)
{
u32 nritems = btrfs_header_nritems(leaf);
struct btrfs_key found_key;
static u64 xattr_access = 0;
static u64 xattr_default = 0;
int scanned = 0;
if (!xattr_access) {
xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS,
strlen(POSIX_ACL_XATTR_ACCESS));
xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT,
strlen(POSIX_ACL_XATTR_DEFAULT));
}
slot++;
*first_xattr_slot = -1;
while (slot < nritems) {
btrfs_item_key_to_cpu(leaf, &found_key, slot);
/* we found a different objectid, there must not be acls */
if (found_key.objectid != objectid)
return 0;
/* we found an xattr, assume we've got an acl */
if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
if (*first_xattr_slot == -1)
*first_xattr_slot = slot;
if (found_key.offset == xattr_access ||
found_key.offset == xattr_default)
return 1;
}
/*
* we found a key greater than an xattr key, there can't
* be any acls later on
*/
if (found_key.type > BTRFS_XATTR_ITEM_KEY)
return 0;
slot++;
scanned++;
/*
* it goes inode, inode backrefs, xattrs, extents,
* so if there are a ton of hard links to an inode there can
* be a lot of backrefs. Don't waste time searching too hard,
* this is just an optimization
*/
if (scanned >= 8)
break;
}
/* we hit the end of the leaf before we found an xattr or
* something larger than an xattr. We have to assume the inode
* has acls
*/
if (*first_xattr_slot == -1)
*first_xattr_slot = slot;
return 1;
}
/*
* read an inode from the btree into the in-memory inode
*/
static void btrfs_read_locked_inode(struct inode *inode)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_inode_item *inode_item;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key location;
unsigned long ptr;
int maybe_acls;
u32 rdev;
int ret;
bool filled = false;
int first_xattr_slot;
ret = btrfs_fill_inode(inode, &rdev);
if (!ret)
filled = true;
path = btrfs_alloc_path();
if (!path)
goto make_bad;
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
if (ret)
goto make_bad;
leaf = path->nodes[0];
if (filled)
goto cache_index;
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
inode->i_mode = btrfs_inode_mode(leaf, inode_item);
set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
BTRFS_I(inode)->i_otime.tv_sec =
btrfs_timespec_sec(leaf, &inode_item->otime);
BTRFS_I(inode)->i_otime.tv_nsec =
btrfs_timespec_nsec(leaf, &inode_item->otime);
inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
inode->i_version = btrfs_inode_sequence(leaf, inode_item);
inode->i_generation = BTRFS_I(inode)->generation;
inode->i_rdev = 0;
rdev = btrfs_inode_rdev(leaf, inode_item);
BTRFS_I(inode)->index_cnt = (u64)-1;
BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
cache_index:
/*
* If we were modified in the current generation and evicted from memory
* and then re-read we need to do a full sync since we don't have any
* idea about which extents were modified before we were evicted from
* cache.
*
* This is required for both inode re-read from disk and delayed inode
* in delayed_nodes_tree.
*/
if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
/*
* We don't persist the id of the transaction where an unlink operation
* against the inode was last made. So here we assume the inode might
* have been evicted, and therefore the exact value of last_unlink_trans
* lost, and set it to last_trans to avoid metadata inconsistencies
* between the inode and its parent if the inode is fsync'ed and the log
* replayed. For example, in the scenario:
*
* touch mydir/foo
* ln mydir/foo mydir/bar
* sync
* unlink mydir/bar
* echo 2 > /proc/sys/vm/drop_caches # evicts inode
* xfs_io -c fsync mydir/foo
* <power failure>
* mount fs, triggers fsync log replay
*
* We must make sure that when we fsync our inode foo we also log its
* parent inode, otherwise after log replay the parent still has the
* dentry with the "bar" name but our inode foo has a link count of 1
* and doesn't have an inode ref with the name "bar" anymore.
*
* Setting last_unlink_trans to last_trans is a pessimistic approach,
* but it guarantees correctness at the expense of ocassional full
* transaction commits on fsync if our inode is a directory, or if our
* inode is not a directory, logging its parent unnecessarily.
*/
BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
path->slots[0]++;
if (inode->i_nlink != 1 ||
path->slots[0] >= btrfs_header_nritems(leaf))
goto cache_acl;
btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
if (location.objectid != btrfs_ino(inode))
goto cache_acl;
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
if (location.type == BTRFS_INODE_REF_KEY) {
struct btrfs_inode_ref *ref;
ref = (struct btrfs_inode_ref *)ptr;
BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
struct btrfs_inode_extref *extref;
extref = (struct btrfs_inode_extref *)ptr;
BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
extref);
}
cache_acl:
/*
* try to precache a NULL acl entry for files that don't have
* any xattrs or acls
*/
maybe_acls = acls_after_inode_item(leaf, path->slots[0],
btrfs_ino(inode), &first_xattr_slot);
if (first_xattr_slot != -1) {
path->slots[0] = first_xattr_slot;
ret = btrfs_load_inode_props(inode, path);
if (ret)
btrfs_err(root->fs_info,
"error loading props for ino %llu (root %llu): %d",
btrfs_ino(inode),
root->root_key.objectid, ret);
}
btrfs_free_path(path);
if (!maybe_acls)
cache_no_acl(inode);
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_mapping->a_ops = &btrfs_aops;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
break;
case S_IFDIR:
inode->i_fop = &btrfs_dir_file_operations;
if (root == root->fs_info->tree_root)
inode->i_op = &btrfs_dir_ro_inode_operations;
else
inode->i_op = &btrfs_dir_inode_operations;
break;
case S_IFLNK:
inode->i_op = &btrfs_symlink_inode_operations;
inode->i_mapping->a_ops = &btrfs_symlink_aops;
break;
default:
inode->i_op = &btrfs_special_inode_operations;
init_special_inode(inode, inode->i_mode, rdev);
break;
}
btrfs_update_iflags(inode);
return;
make_bad:
btrfs_free_path(path);
make_bad_inode(inode);
}
/*
* given a leaf and an inode, copy the inode fields into the leaf
*/
static void fill_inode_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf,
struct btrfs_inode_item *item,
struct inode *inode)
{
struct btrfs_map_token token;
btrfs_init_map_token(&token);
btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
&token);
btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
btrfs_set_token_timespec_sec(leaf, &item->atime,
inode->i_atime.tv_sec, &token);
btrfs_set_token_timespec_nsec(leaf, &item->atime,
inode->i_atime.tv_nsec, &token);
btrfs_set_token_timespec_sec(leaf, &item->mtime,
inode->i_mtime.tv_sec, &token);
btrfs_set_token_timespec_nsec(leaf, &item->mtime,
inode->i_mtime.tv_nsec, &token);
btrfs_set_token_timespec_sec(leaf, &item->ctime,
inode->i_ctime.tv_sec, &token);
btrfs_set_token_timespec_nsec(leaf, &item->ctime,
inode->i_ctime.tv_nsec, &token);
btrfs_set_token_timespec_sec(leaf, &item->otime,
BTRFS_I(inode)->i_otime.tv_sec, &token);
btrfs_set_token_timespec_nsec(leaf, &item->otime,
BTRFS_I(inode)->i_otime.tv_nsec, &token);
btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
&token);
btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
&token);
btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
btrfs_set_token_inode_block_group(leaf, item, 0, &token);
}
/*
* copy everything in the in-memory inode into the btree.
*/
static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
struct btrfs_inode_item *inode_item;
struct btrfs_path *path;
struct extent_buffer *leaf;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
goto failed;
}
leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
fill_inode_item(trans, leaf, inode_item, inode);
btrfs_mark_buffer_dirty(leaf);
btrfs_set_inode_last_trans(trans, inode);
ret = 0;
failed:
btrfs_free_path(path);
return ret;
}
/*
* copy everything in the in-memory inode into the btree.
*/
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
int ret;
/*
* If the inode is a free space inode, we can deadlock during commit
* if we put it into the delayed code.
*
* The data relocation inode should also be directly updated
* without delay
*/
if (!btrfs_is_free_space_inode(inode)
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
&& !root->fs_info->log_root_recovering) {
btrfs_update_root_times(trans, root);
ret = btrfs_delayed_update_inode(trans, root, inode);
if (!ret)
btrfs_set_inode_last_trans(trans, inode);
return ret;
}
return btrfs_update_inode_item(trans, root, inode);
}
noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode)
{
int ret;
ret = btrfs_update_inode(trans, root, inode);
if (ret == -ENOSPC)
return btrfs_update_inode_item(trans, root, inode);
return ret;
}
/*
* unlink helper that gets used here in inode.c and in the tree logging
* recovery code. It remove a link in a directory with a given name, and
* also drops the back refs in the inode to the directory
*/
static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir, struct inode *inode,
const char *name, int name_len)
{
struct btrfs_path *path;
int ret = 0;
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
struct btrfs_key key;
u64 index;
u64 ino = btrfs_ino(inode);
u64 dir_ino = btrfs_ino(dir);
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
path->leave_spinning = 1;
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
if (IS_ERR(di)) {
ret = PTR_ERR(di);
goto err;
}
if (!di) {
ret = -ENOENT;
goto err;
}
leaf = path->nodes[0];
btrfs_dir_item_key_to_cpu(leaf, di, &key);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret)
goto err;
btrfs_release_path(path);
/*
* If we don't have dir index, we have to get it by looking up
* the inode ref, since we get the inode ref, remove it directly,
* it is unnecessary to do delayed deletion.
*
* But if we have dir index, needn't search inode ref to get it.
* Since the inode ref is close to the inode item, it is better
* that we delay to delete it, and just do this deletion when
* we update the inode item.
*/
if (BTRFS_I(inode)->dir_index) {
ret = btrfs_delayed_delete_inode_ref(inode);
if (!ret) {
index = BTRFS_I(inode)->dir_index;
goto skip_backref;
}
}
ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
dir_ino, &index);
if (ret) {
btrfs_info(root->fs_info,
"failed to delete reference to %.*s, inode %llu parent %llu",
name_len, name, ino, dir_ino);
btrfs_abort_transaction(trans, root, ret);
goto err;
}
skip_backref:
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto err;
}
ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
inode, dir_ino);
if (ret != 0 && ret != -ENOENT) {
btrfs_abort_transaction(trans, root, ret);
goto err;
}
ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
dir, index);
if (ret == -ENOENT)
ret = 0;
else if (ret)
btrfs_abort_transaction(trans, root, ret);
err:
btrfs_free_path(path);
if (ret)
goto out;
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
inode_inc_iversion(inode);
inode_inc_iversion(dir);
inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, dir);
out:
return ret;
}
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir, struct inode *inode,
const char *name, int name_len)
{
int ret;
ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
if (!ret) {
drop_nlink(inode);
ret = btrfs_update_inode(trans, root, inode);
}
return ret;
}
/*
* helper to start transaction for unlink and rmdir.
*
* unlink and rmdir are special in btrfs, they do not always free space, so
* if we cannot make our reservations the normal way try and see if there is
* plenty of slack room in the global reserve to migrate, otherwise we cannot
* allow the unlink to occur.
*/
static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
int ret;
/*
* 1 for the possible orphan item
* 1 for the dir item
* 1 for the dir index
* 1 for the inode ref
* 1 for the inode
*/
trans = btrfs_start_transaction(root, 5);
if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
return trans;
if (PTR_ERR(trans) == -ENOSPC) {
u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans))
return trans;
ret = btrfs_cond_migrate_bytes(root->fs_info,
&root->fs_info->trans_block_rsv,
num_bytes, 5);
if (ret) {
btrfs_end_transaction(trans, root);
return ERR_PTR(ret);
}
trans->block_rsv = &root->fs_info->trans_block_rsv;
trans->bytes_reserved = num_bytes;
}
return trans;
}
static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
{
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_trans_handle *trans;
struct inode *inode = d_inode(dentry);
int ret;
trans = __unlink_start_trans(dir);
if (IS_ERR(trans))
return PTR_ERR(trans);
btrfs_record_unlink_dir(trans, dir, d_inode(dentry), 0);
ret = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
dentry->d_name.name, dentry->d_name.len);
if (ret)
goto out;
if (inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans, inode);
if (ret)
goto out;
}
out:
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
return ret;
}
int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir, u64 objectid,
const char *name, int name_len)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
struct btrfs_key key;
u64 index;
int ret;
u64 dir_ino = btrfs_ino(dir);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
if (IS_ERR_OR_NULL(di)) {
if (!di)
ret = -ENOENT;
else
ret = PTR_ERR(di);
goto out;
}
leaf = path->nodes[0];
btrfs_dir_item_key_to_cpu(leaf, di, &key);
WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
btrfs_release_path(path);
ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
objectid, root->root_key.objectid,
dir_ino, &index, name, name_len);
if (ret < 0) {
if (ret != -ENOENT) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
di = btrfs_search_dir_index_item(root, path, dir_ino,
name, name_len);
if (IS_ERR_OR_NULL(di)) {
if (!di)
ret = -ENOENT;
else
ret = PTR_ERR(di);
btrfs_abort_transaction(trans, root, ret);
goto out;
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
btrfs_release_path(path);
index = key.offset;
}
btrfs_release_path(path);
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
inode_inc_iversion(dir);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode_fallback(trans, root, dir);
if (ret)
btrfs_abort_transaction(trans, root, ret);
out:
btrfs_free_path(path);
return ret;
}
static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
int err = 0;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_trans_handle *trans;
if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
return -EPERM;
trans = __unlink_start_trans(dir);
if (IS_ERR(trans))
return PTR_ERR(trans);
if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
err = btrfs_unlink_subvol(trans, root, dir,
BTRFS_I(inode)->location.objectid,
dentry->d_name.name,
dentry->d_name.len);
goto out;
}
err = btrfs_orphan_add(trans, inode);
if (err)
goto out;
/* now the directory is empty */
err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
dentry->d_name.name, dentry->d_name.len);
if (!err)
btrfs_i_size_write(inode, 0);
out:
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
return err;
}
static int truncate_space_check(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytes_deleted)
{
int ret;
bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
if (!ret)
trans->bytes_reserved += bytes_deleted;
return ret;
}
static int truncate_inline_extent(struct inode *inode,
struct btrfs_path *path,
struct btrfs_key *found_key,
const u64 item_end,
const u64 new_size)
{
struct extent_buffer *leaf = path->nodes[0];
int slot = path->slots[0];
struct btrfs_file_extent_item *fi;
u32 size = (u32)(new_size - found_key->offset);
struct btrfs_root *root = BTRFS_I(inode)->root;
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
loff_t offset = new_size;
loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE);
/*
* Zero out the remaining of the last page of our inline extent,
* instead of directly truncating our inline extent here - that
* would be much more complex (decompressing all the data, then
* compressing the truncated data, which might be bigger than
* the size of the inline extent, resize the extent, etc).
* We release the path because to get the page we might need to
* read the extent item from disk (data not in the page cache).
*/
btrfs_release_path(path);
return btrfs_truncate_page(inode, offset, page_end - offset, 0);
}
btrfs_set_file_extent_ram_bytes(leaf, fi, size);
size = btrfs_file_extent_calc_inline_size(size);
btrfs_truncate_item(root, path, size, 1);
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
inode_sub_bytes(inode, item_end + 1 - new_size);
return 0;
}
/*
* this can truncate away extent items, csum items and directory items.
* It starts at a high offset and removes keys until it can't find
* any higher than new_size
*
* csum items that cross the new i_size are truncated to the new size
* as well.
*
* min_type is the minimum key type to truncate down to. If set to 0, this
* will kill all the items on this inode, including the INODE_ITEM_KEY.
*/
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode,
u64 new_size, u32 min_type)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
struct btrfs_key found_key;
u64 extent_start = 0;
u64 extent_num_bytes = 0;
u64 extent_offset = 0;
u64 item_end = 0;
u64 last_size = new_size;
u32 found_type = (u8)-1;
int found_extent;
int del_item;
int pending_del_nr = 0;
int pending_del_slot = 0;
int extent_type = -1;
int ret;
int err = 0;
u64 ino = btrfs_ino(inode);
u64 bytes_deleted = 0;
bool be_nice = 0;
bool should_throttle = 0;
bool should_end = 0;
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
/*
* for non-free space inodes and ref cows, we want to back off from
* time to time
*/
if (!btrfs_is_free_space_inode(inode) &&
test_bit(BTRFS_ROOT_REF_COWS, &root->state))
be_nice = 1;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = -1;
/*
* We want to drop from the next block forward in case this new size is
* not block aligned since we will be keeping the last block of the
* extent just the way it is.
*/
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
root == root->fs_info->tree_root)
btrfs_drop_extent_cache(inode, ALIGN(new_size,
root->sectorsize), (u64)-1, 0);
/*
* This function is also used to drop the items in the log tree before
* we relog the inode, so if root != BTRFS_I(inode)->root, it means
* it is used to drop the loged items. So we shouldn't kill the delayed
* items.
*/
if (min_type == 0 && root == BTRFS_I(inode)->root)
btrfs_kill_delayed_inode_items(inode);
key.objectid = ino;
key.offset = (u64)-1;
key.type = (u8)-1;
search_again:
/*
* with a 16K leaf size and 128MB extents, you can actually queue
* up a huge file in a single leaf. Most of the time that
* bytes_deleted is > 0, it will be huge by the time we get here
*/
if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
if (btrfs_should_end_transaction(trans, root)) {
err = -EAGAIN;
goto error;
}
}
path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0) {
/* there are no items in the tree for us to truncate, we're
* done
*/
if (path->slots[0] == 0)
goto out;
path->slots[0]--;
}
while (1) {
fi = NULL;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
found_type = found_key.type;
if (found_key.objectid != ino)
break;
if (found_type < min_type)
break;
item_end = found_key.offset;
if (found_type == BTRFS_EXTENT_DATA_KEY) {
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
item_end +=
btrfs_file_extent_num_bytes(leaf, fi);
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
item_end += btrfs_file_extent_inline_len(leaf,
path->slots[0], fi);
}
item_end--;
}
if (found_type > min_type) {
del_item = 1;
} else {
if (item_end < new_size)
break;
if (found_key.offset >= new_size)
del_item = 1;
else
del_item = 0;
}
found_extent = 0;
/* FIXME, shrink the extent if the ref count is only 1 */
if (found_type != BTRFS_EXTENT_DATA_KEY)
goto delete;
if (del_item)
last_size = found_key.offset;
else
last_size = new_size;
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
u64 num_dec;
extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
if (!del_item) {
u64 orig_num_bytes =
btrfs_file_extent_num_bytes(leaf, fi);
extent_num_bytes = ALIGN(new_size -
found_key.offset,
root->sectorsize);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_num_bytes);
num_dec = (orig_num_bytes -
extent_num_bytes);
if (test_bit(BTRFS_ROOT_REF_COWS,
&root->state) &&
extent_start != 0)
inode_sub_bytes(inode, num_dec);
btrfs_mark_buffer_dirty(leaf);
} else {
extent_num_bytes =
btrfs_file_extent_disk_num_bytes(leaf,
fi);
extent_offset = found_key.offset -
btrfs_file_extent_offset(leaf, fi);
/* FIXME blocksize != 4096 */
num_dec = btrfs_file_extent_num_bytes(leaf, fi);
if (extent_start != 0) {
found_extent = 1;
if (test_bit(BTRFS_ROOT_REF_COWS,
&root->state))
inode_sub_bytes(inode, num_dec);
}
}
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
/*
* we can't truncate inline items that have had
* special encodings
*/
if (!del_item &&
btrfs_file_extent_encryption(leaf, fi) == 0 &&
btrfs_file_extent_other_encoding(leaf, fi) == 0) {
/*
* Need to release path in order to truncate a
* compressed extent. So delete any accumulated
* extent items so far.
*/
if (btrfs_file_extent_compression(leaf, fi) !=
BTRFS_COMPRESS_NONE && pending_del_nr) {
err = btrfs_del_items(trans, root, path,
pending_del_slot,
pending_del_nr);
if (err) {
btrfs_abort_transaction(trans,
root,
err);
goto error;
}
pending_del_nr = 0;
}
err = truncate_inline_extent(inode, path,
&found_key,
item_end,
new_size);
if (err) {
btrfs_abort_transaction(trans,
root, err);
goto error;
}
} else if (test_bit(BTRFS_ROOT_REF_COWS,
&root->state)) {
inode_sub_bytes(inode, item_end + 1 - new_size);
}
}
delete:
if (del_item) {
if (!pending_del_nr) {
/* no pending yet, add ourselves */
pending_del_slot = path->slots[0];
pending_del_nr = 1;
} else if (pending_del_nr &&
path->slots[0] + 1 == pending_del_slot) {
/* hop on the pending chunk */
pending_del_nr++;
pending_del_slot = path->slots[0];
} else {
BUG();
}
} else {
break;
}
should_throttle = 0;
if (found_extent &&
(test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
root == root->fs_info->tree_root)) {
btrfs_set_path_blocking(path);
bytes_deleted += extent_num_bytes;
ret = btrfs_free_extent(trans, root, extent_start,
extent_num_bytes, 0,
btrfs_header_owner(leaf),
ino, extent_offset, 0);
BUG_ON(ret);
if (btrfs_should_throttle_delayed_refs(trans, root))
btrfs_async_run_delayed_refs(root,
trans->delayed_ref_updates * 2, 0);
if (be_nice) {
if (truncate_space_check(trans, root,
extent_num_bytes)) {
should_end = 1;
}
if (btrfs_should_throttle_delayed_refs(trans,
root)) {
should_throttle = 1;
}
}
}
if (found_type == BTRFS_INODE_ITEM_KEY)
break;
if (path->slots[0] == 0 ||
path->slots[0] != pending_del_slot ||
should_throttle || should_end) {
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path,
pending_del_slot,
pending_del_nr);
if (ret) {
btrfs_abort_transaction(trans,
root, ret);
goto error;
}
pending_del_nr = 0;
}
btrfs_release_path(path);
if (should_throttle) {
unsigned long updates = trans->delayed_ref_updates;
if (updates) {
trans->delayed_ref_updates = 0;
ret = btrfs_run_delayed_refs(trans, root, updates * 2);
if (ret && !err)
err = ret;
}
}
/*
* if we failed to refill our space rsv, bail out
* and let the transaction restart
*/
if (should_end) {
err = -EAGAIN;
goto error;
}
goto search_again;
} else {
path->slots[0]--;
}
}
out:
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path, pending_del_slot,
pending_del_nr);
if (ret)
btrfs_abort_transaction(trans, root, ret);
}
error:
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
btrfs_ordered_update_i_size(inode, last_size, NULL);
btrfs_free_path(path);
if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
unsigned long updates = trans->delayed_ref_updates;
if (updates) {
trans->delayed_ref_updates = 0;
ret = btrfs_run_delayed_refs(trans, root, updates * 2);
if (ret && !err)
err = ret;
}
}
return err;
}
/*
* btrfs_truncate_page - read, zero a chunk and write a page
* @inode - inode that we're zeroing
* @from - the offset to start zeroing
* @len - the length to zero, 0 to zero the entire range respective to the
* offset
* @front - zero up to the offset instead of from the offset on
*
* This will find the page for the "from" offset and cow the page and zero the
* part we want to zero. This is used with truncate and hole punching.
*/
int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
int front)
{
struct address_space *mapping = inode->i_mapping;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
char *kaddr;
u32 blocksize = root->sectorsize;
pgoff_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
struct page *page;
gfp_t mask = btrfs_alloc_write_mask(mapping);
int ret = 0;
u64 page_start;
u64 page_end;
if ((offset & (blocksize - 1)) == 0 &&
(!len || ((len & (blocksize - 1)) == 0)))
goto out;
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
if (ret)
goto out;
again:
page = find_or_create_page(mapping, index, mask);
if (!page) {
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
ret = -ENOMEM;
goto out;
}
page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
if (!PageUptodate(page)) {
ret = btrfs_readpage(NULL, page);
lock_page(page);
if (page->mapping != mapping) {
unlock_page(page);
page_cache_release(page);
goto again;
}
if (!PageUptodate(page)) {
ret = -EIO;
goto out_unlock;
}
}
wait_on_page_writeback(page);
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
set_page_extent_mapped(page);
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
unlock_page(page);
page_cache_release(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
goto again;
}
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
&cached_state);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
goto out_unlock;
}
if (offset != PAGE_CACHE_SIZE) {
if (!len)
len = PAGE_CACHE_SIZE - offset;
kaddr = kmap(page);
if (front)
memset(kaddr, 0, offset);
else
memset(kaddr + offset, 0, len);
flush_dcache_page(page);
kunmap(page);
}
ClearPageChecked(page);
set_page_dirty(page);
unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
GFP_NOFS);
out_unlock:
if (ret)
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
unlock_page(page);
page_cache_release(page);
out:
return ret;
}
static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
u64 offset, u64 len)
{
struct btrfs_trans_handle *trans;
int ret;
/*
* Still need to make sure the inode looks like it's been updated so
* that any holes get logged if we fsync.
*/
if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) {
BTRFS_I(inode)->last_trans = root->fs_info->generation;
BTRFS_I(inode)->last_sub_trans = root->log_transid;
BTRFS_I(inode)->last_log_commit = root->last_log_commit;
return 0;
}
/*
* 1 - for the one we're dropping
* 1 - for the one we're adding
* 1 - for updating the inode.
*/
trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
btrfs_end_transaction(trans, root);
return ret;
}
ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
0, 0, len, 0, len, 0, 0, 0);
if (ret)
btrfs_abort_transaction(trans, root, ret);
else
btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
return ret;
}
/*
* This function puts in dummy file extents for the area we're creating a hole
* for. So if we are truncating this file to a larger size we need to insert
* these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
* the range between oldsize and size
*/
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
u64 hole_start = ALIGN(oldsize, root->sectorsize);
u64 block_end = ALIGN(size, root->sectorsize);
u64 last_byte;
u64 cur_offset;
u64 hole_size;
int err = 0;
/*
* If our size started in the middle of a page we need to zero out the
* rest of the page before we expand the i_size, otherwise we could
* expose stale data.
*/
err = btrfs_truncate_page(inode, oldsize, 0, 0);
if (err)
return err;
if (size <= hole_start)
return 0;
while (1) {
struct btrfs_ordered_extent *ordered;
lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
&cached_state);
ordered = btrfs_lookup_ordered_range(inode, hole_start,
block_end - hole_start);
if (!ordered)
break;
unlock_extent_cached(io_tree, hole_start, block_end - 1,
&cached_state, GFP_NOFS);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
}
cur_offset = hole_start;
while (1) {
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
block_end - cur_offset, 0);
if (IS_ERR(em)) {
err = PTR_ERR(em);
em = NULL;
break;
}
last_byte = min(extent_map_end(em), block_end);
last_byte = ALIGN(last_byte , root->sectorsize);
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
struct extent_map *hole_em;
hole_size = last_byte - cur_offset;
err = maybe_insert_hole(root, inode, cur_offset,
hole_size);
if (err)
break;
btrfs_drop_extent_cache(inode, cur_offset,
cur_offset + hole_size - 1, 0);
hole_em = alloc_extent_map();
if (!hole_em) {
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
goto next;
}
hole_em->start = cur_offset;
hole_em->len = hole_size;
hole_em->orig_start = cur_offset;
hole_em->block_start = EXTENT_MAP_HOLE;
hole_em->block_len = 0;
hole_em->orig_block_len = 0;
hole_em->ram_bytes = hole_size;
hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
hole_em->compress_type = BTRFS_COMPRESS_NONE;
hole_em->generation = root->fs_info->generation;
while (1) {
write_lock(&em_tree->lock);
err = add_extent_mapping(em_tree, hole_em, 1);
write_unlock(&em_tree->lock);
if (err != -EEXIST)
break;
btrfs_drop_extent_cache(inode, cur_offset,
cur_offset +
hole_size - 1, 0);
}
free_extent_map(hole_em);
}
next:
free_extent_map(em);
em = NULL;
cur_offset = last_byte;
if (cur_offset >= block_end)
break;
}
free_extent_map(em);
unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
GFP_NOFS);
return err;
}
static int wait_snapshoting_atomic_t(atomic_t *a)
{
schedule();
return 0;
}
static void wait_for_snapshot_creation(struct btrfs_root *root)
{
while (true) {
int ret;
ret = btrfs_start_write_no_snapshoting(root);
if (ret)
break;
wait_on_atomic_t(&root->will_be_snapshoted,
wait_snapshoting_atomic_t,
TASK_UNINTERRUPTIBLE);
}
}
static int btrfs_setsize(struct inode *inode, struct iattr *attr)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
loff_t oldsize = i_size_read(inode);
loff_t newsize = attr->ia_size;
int mask = attr->ia_valid;
int ret;
/*
* The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
* special case where we need to update the times despite not having
* these flags set. For all other operations the VFS set these flags
* explicitly if it wants a timestamp update.
*/
if (newsize != oldsize) {
inode_inc_iversion(inode);
if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
inode->i_ctime = inode->i_mtime =
current_fs_time(inode->i_sb);
}
if (newsize > oldsize) {
truncate_pagecache(inode, newsize);
/*
* Don't do an expanding truncate while snapshoting is ongoing.
* This is to ensure the snapshot captures a fully consistent
* state of this file - if the snapshot captures this expanding
* truncation, it must capture all writes that happened before
* this truncation.
*/
wait_for_snapshot_creation(root);
ret = btrfs_cont_expand(inode, oldsize, newsize);
if (ret) {
btrfs_end_write_no_snapshoting(root);
return ret;
}
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
btrfs_end_write_no_snapshoting(root);
return PTR_ERR(trans);
}
i_size_write(inode, newsize);
btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
ret = btrfs_update_inode(trans, root, inode);
btrfs_end_write_no_snapshoting(root);
btrfs_end_transaction(trans, root);
} else {
/*
* We're truncating a file that used to have good data down to
* zero. Make sure it gets into the ordered flush list so that
* any new writes get down to disk quickly.
*/
if (newsize == 0)
set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
&BTRFS_I(inode)->runtime_flags);
/*
* 1 for the orphan item we're going to add
* 1 for the orphan item deletion.
*/
trans = btrfs_start_transaction(root, 2);
if (IS_ERR(trans))
return PTR_ERR(trans);
/*
* We need to do this in case we fail at _any_ point during the
* actual truncate. Once we do the truncate_setsize we could
* invalidate pages which forces any outstanding ordered io to
* be instantly completed which will give us extents that need
* to be truncated. If we fail to get an orphan inode down we
* could have left over extents that were never meant to live,
* so we need to garuntee from this point on that everything
* will be consistent.
*/
ret = btrfs_orphan_add(trans, inode);
btrfs_end_transaction(trans, root);
if (ret)
return ret;
/* we don't support swapfiles, so vmtruncate shouldn't fail */
truncate_setsize(inode, newsize);
/* Disable nonlocked read DIO to avoid the end less truncate */
btrfs_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
btrfs_inode_resume_unlocked_dio(inode);
ret = btrfs_truncate(inode);
if (ret && inode->i_nlink) {
int err;
/*
* failed to truncate, disk_i_size is only adjusted down
* as we remove extents, so it should represent the true
* size of the inode, so reset the in memory size and
* delete our orphan entry.
*/
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
btrfs_orphan_del(NULL, inode);
return ret;
}
i_size_write(inode, BTRFS_I(inode)->disk_i_size);
err = btrfs_orphan_del(trans, inode);
if (err)
btrfs_abort_transaction(trans, root, err);
btrfs_end_transaction(trans, root);
}
}
return ret;
}
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct btrfs_root *root = BTRFS_I(inode)->root;
int err;
if (btrfs_root_readonly(root))
return -EROFS;
err = inode_change_ok(inode, attr);
if (err)
return err;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
err = btrfs_setsize(inode, attr);
if (err)
return err;
}
if (attr->ia_valid) {
setattr_copy(inode, attr);
inode_inc_iversion(inode);
err = btrfs_dirty_inode(inode);
if (!err && attr->ia_valid & ATTR_MODE)
err = posix_acl_chmod(inode, inode->i_mode);
}
return err;
}
/*
* While truncating the inode pages during eviction, we get the VFS calling
* btrfs_invalidatepage() against each page of the inode. This is slow because
* the calls to btrfs_invalidatepage() result in a huge amount of calls to
* lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
* extent_state structures over and over, wasting lots of time.
*
* Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
* those expensive operations on a per page basis and do only the ordered io
* finishing, while we release here the extent_map and extent_state structures,
* without the excessive merging and splitting.
*/
static void evict_inode_truncate_pages(struct inode *inode)
{
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
struct rb_node *node;
ASSERT(inode->i_state & I_FREEING);
truncate_inode_pages_final(&inode->i_data);
write_lock(&map_tree->lock);
while (!RB_EMPTY_ROOT(&map_tree->map)) {
struct extent_map *em;
node = rb_first(&map_tree->map);
em = rb_entry(node, struct extent_map, rb_node);
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
remove_extent_mapping(map_tree, em);
free_extent_map(em);
if (need_resched()) {
write_unlock(&map_tree->lock);
cond_resched();
write_lock(&map_tree->lock);
}
}
write_unlock(&map_tree->lock);
/*
* Keep looping until we have no more ranges in the io tree.
* We can have ongoing bios started by readpages (called from readahead)
* that have their endio callback (extent_io.c:end_bio_extent_readpage)
* still in progress (unlocked the pages in the bio but did not yet
* unlocked the ranges in the io tree). Therefore this means some
* ranges can still be locked and eviction started because before
* submitting those bios, which are executed by a separate task (work
* queue kthread), inode references (inode->i_count) were not taken
* (which would be dropped in the end io callback of each bio).
* Therefore here we effectively end up waiting for those bios and
* anyone else holding locked ranges without having bumped the inode's
* reference count - if we don't do it, when they access the inode's
* io_tree to unlock a range it may be too late, leading to an
* use-after-free issue.
*/
spin_lock(&io_tree->lock);
while (!RB_EMPTY_ROOT(&io_tree->state)) {
struct extent_state *state;
struct extent_state *cached_state = NULL;
u64 start;
u64 end;
node = rb_first(&io_tree->state);
state = rb_entry(node, struct extent_state, rb_node);
start = state->start;
end = state->end;
spin_unlock(&io_tree->lock);
lock_extent_bits(io_tree, start, end, 0, &cached_state);
clear_extent_bit(io_tree, start, end,
EXTENT_LOCKED | EXTENT_DIRTY |
EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, 1, 1,
&cached_state, GFP_NOFS);
cond_resched();
spin_lock(&io_tree->lock);
}
spin_unlock(&io_tree->lock);
}
void btrfs_evict_inode(struct inode *inode)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv, *global_rsv;
int steal_from_global = 0;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
int ret;
trace_btrfs_inode_evict(inode);
evict_inode_truncate_pages(inode);
if (inode->i_nlink &&
((btrfs_root_refs(&root->root_item) != 0 &&
root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
btrfs_is_free_space_inode(inode)))
goto no_delete;
if (is_bad_inode(inode)) {
btrfs_orphan_del(NULL, inode);
goto no_delete;
}
/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
if (!special_file(inode->i_mode))
btrfs_wait_ordered_range(inode, 0, (u64)-1);
btrfs_free_io_failure_record(inode, 0, (u64)-1);
if (root->fs_info->log_root_recovering) {
BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags));
goto no_delete;
}
if (inode->i_nlink > 0) {
BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
goto no_delete;
}
ret = btrfs_commit_inode_delayed_inode(inode);
if (ret) {
btrfs_orphan_del(NULL, inode);
goto no_delete;
}
rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
if (!rsv) {
btrfs_orphan_del(NULL, inode);
goto no_delete;
}
rsv->size = min_size;
rsv->failfast = 1;
global_rsv = &root->fs_info->global_block_rsv;
btrfs_i_size_write(inode, 0);
/*
* This is a bit simpler than btrfs_truncate since we've already
* reserved our space for our orphan item in the unlink, so we just
* need to reserve some slack space in case we add bytes and update
* inode item when doing the truncate.
*/
while (1) {
ret = btrfs_block_rsv_refill(root, rsv, min_size,
BTRFS_RESERVE_FLUSH_LIMIT);
/*
* Try and steal from the global reserve since we will
* likely not use this space anyway, we want to try as
* hard as possible to get this to work.
*/
if (ret)
steal_from_global++;
else
steal_from_global = 0;
ret = 0;
/*
* steal_from_global == 0: we reserved stuff, hooray!
* steal_from_global == 1: we didn't reserve stuff, boo!
* steal_from_global == 2: we've committed, still not a lot of
* room but maybe we'll have room in the global reserve this
* time.
* steal_from_global == 3: abandon all hope!
*/
if (steal_from_global > 2) {
btrfs_warn(root->fs_info,
"Could not get space for a delete, will truncate on mount %d",
ret);
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
goto no_delete;
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
goto no_delete;
}
/*
* We can't just steal from the global reserve, we need tomake
* sure there is room to do it, if not we need to commit and try
* again.
*/
if (steal_from_global) {
if (!btrfs_check_space_for_delayed_refs(trans, root))
ret = btrfs_block_rsv_migrate(global_rsv, rsv,
min_size);
else
ret = -ENOSPC;
}
/*
* Couldn't steal from the global reserve, we have too much
* pending stuff built up, commit the transaction and try it
* again.
*/
if (ret) {
ret = btrfs_commit_transaction(trans, root);
if (ret) {
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
goto no_delete;
}
continue;
} else {
steal_from_global = 0;
}
trans->block_rsv = rsv;
ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
if (ret != -ENOSPC && ret != -EAGAIN)
break;
trans->block_rsv = &root->fs_info->trans_block_rsv;
btrfs_end_transaction(trans, root);
trans = NULL;
btrfs_btree_balance_dirty(root);
}
btrfs_free_block_rsv(root, rsv);
/*
* Errors here aren't a big deal, it just means we leave orphan items
* in the tree. They will be cleaned up on the next mount.
*/
if (ret == 0) {
trans->block_rsv = root->orphan_block_rsv;
btrfs_orphan_del(trans, inode);
} else {
btrfs_orphan_del(NULL, inode);
}
trans->block_rsv = &root->fs_info->trans_block_rsv;
if (!(root == root->fs_info->tree_root ||
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
btrfs_return_ino(root, btrfs_ino(inode));
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
no_delete:
btrfs_remove_delayed_node(inode);
clear_inode(inode);
return;
}
/*
* this returns the key found in the dir entry in the location pointer.
* If no dir entries were found, location->objectid is 0.
*/
static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
struct btrfs_key *location)
{
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct btrfs_dir_item *di;
struct btrfs_path *path;
struct btrfs_root *root = BTRFS_I(dir)->root;
int ret = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
namelen, 0);
if (IS_ERR(di))
ret = PTR_ERR(di);
if (IS_ERR_OR_NULL(di))
goto out_err;
btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
out:
btrfs_free_path(path);
return ret;
out_err:
location->objectid = 0;
goto out;
}
/*
* when we hit a tree root in a directory, the btrfs part of the inode
* needs to be changed to reflect the root directory of the tree root. This
* is kind of like crossing a mount point.
*/
static int fixup_tree_root_location(struct btrfs_root *root,
struct inode *dir,
struct dentry *dentry,
struct btrfs_key *location,
struct btrfs_root **sub_root)
{
struct btrfs_path *path;
struct btrfs_root *new_root;
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
struct btrfs_key key;
int ret;
int err = 0;
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
goto out;
}
err = -ENOENT;
key.objectid = BTRFS_I(dir)->root->root_key.objectid;
key.type = BTRFS_ROOT_REF_KEY;
key.offset = location->objectid;
ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path,
0, 0);
if (ret) {
if (ret < 0)
err = ret;
goto out;
}
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
goto out;
ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
(unsigned long)(ref + 1),
dentry->d_name.len);
if (ret)
goto out;
btrfs_release_path(path);
new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
if (IS_ERR(new_root)) {
err = PTR_ERR(new_root);
goto out;
}
*sub_root = new_root;
location->objectid = btrfs_root_dirid(&new_root->root_item);
location->type = BTRFS_INODE_ITEM_KEY;
location->offset = 0;
err = 0;
out:
btrfs_free_path(path);
return err;
}
static void inode_tree_add(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_inode *entry;
struct rb_node **p;
struct rb_node *parent;
struct rb_node *new = &BTRFS_I(inode)->rb_node;
u64 ino = btrfs_ino(inode);
if (inode_unhashed(inode))
return;
parent = NULL;
spin_lock(&root->inode_lock);
p = &root->inode_tree.rb_node;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct btrfs_inode, rb_node);
if (ino < btrfs_ino(&entry->vfs_inode))
p = &parent->rb_left;
else if (ino > btrfs_ino(&entry->vfs_inode))
p = &parent->rb_right;
else {
WARN_ON(!(entry->vfs_inode.i_state &
(I_WILL_FREE | I_FREEING)));
rb_replace_node(parent, new, &root->inode_tree);
RB_CLEAR_NODE(parent);
spin_unlock(&root->inode_lock);
return;
}
}
rb_link_node(new, parent, p);
rb_insert_color(new, &root->inode_tree);
spin_unlock(&root->inode_lock);
}
static void inode_tree_del(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int empty = 0;
spin_lock(&root->inode_lock);
if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
empty = RB_EMPTY_ROOT(&root->inode_tree);
}
spin_unlock(&root->inode_lock);
if (empty && btrfs_root_refs(&root->root_item) == 0) {
synchronize_srcu(&root->fs_info->subvol_srcu);
spin_lock(&root->inode_lock);
empty = RB_EMPTY_ROOT(&root->inode_tree);
spin_unlock(&root->inode_lock);
if (empty)
btrfs_add_dead_root(root);
}
}
void btrfs_invalidate_inodes(struct btrfs_root *root)
{
struct rb_node *node;
struct rb_node *prev;
struct btrfs_inode *entry;
struct inode *inode;
u64 objectid = 0;
if (!test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
WARN_ON(btrfs_root_refs(&root->root_item) != 0);
spin_lock(&root->inode_lock);
again:
node = root->inode_tree.rb_node;
prev = NULL;
while (node) {
prev = node;
entry = rb_entry(node, struct btrfs_inode, rb_node);
if (objectid < btrfs_ino(&entry->vfs_inode))
node = node->rb_left;
else if (objectid > btrfs_ino(&entry->vfs_inode))
node = node->rb_right;
else
break;
}
if (!node) {
while (prev) {
entry = rb_entry(prev, struct btrfs_inode, rb_node);
if (objectid <= btrfs_ino(&entry->vfs_inode)) {
node = prev;
break;
}
prev = rb_next(prev);
}
}
while (node) {
entry = rb_entry(node, struct btrfs_inode, rb_node);
objectid = btrfs_ino(&entry->vfs_inode) + 1;
inode = igrab(&entry->vfs_inode);
if (inode) {
spin_unlock(&root->inode_lock);
if (atomic_read(&inode->i_count) > 1)
d_prune_aliases(inode);
/*
* btrfs_drop_inode will have it removed from
* the inode cache when its usage count
* hits zero.
*/
iput(inode);
cond_resched();
spin_lock(&root->inode_lock);
goto again;
}
if (cond_resched_lock(&root->inode_lock))
goto again;
node = rb_next(node);
}
spin_unlock(&root->inode_lock);
}
static int btrfs_init_locked_inode(struct inode *inode, void *p)
{
struct btrfs_iget_args *args = p;
inode->i_ino = args->location->objectid;
memcpy(&BTRFS_I(inode)->location, args->location,
sizeof(*args->location));
BTRFS_I(inode)->root = args->root;
return 0;
}
static int btrfs_find_actor(struct inode *inode, void *opaque)
{
struct btrfs_iget_args *args = opaque;
return args->location->objectid == BTRFS_I(inode)->location.objectid &&
args->root == BTRFS_I(inode)->root;
}
static struct inode *btrfs_iget_locked(struct super_block *s,
struct btrfs_key *location,
struct btrfs_root *root)
{
struct inode *inode;
struct btrfs_iget_args args;
unsigned long hashval = btrfs_inode_hash(location->objectid, root);
args.location = location;
args.root = root;
inode = iget5_locked(s, hashval, btrfs_find_actor,
btrfs_init_locked_inode,
(void *)&args);
return inode;
}
/* Get an inode object given its location and corresponding root.
* Returns in *is_new if the inode was read from disk
*/
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *new)
{
struct inode *inode;
inode = btrfs_iget_locked(s, location, root);
if (!inode)
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
btrfs_read_locked_inode(inode);
if (!is_bad_inode(inode)) {
inode_tree_add(inode);
unlock_new_inode(inode);
if (new)
*new = 1;
} else {
unlock_new_inode(inode);
iput(inode);
inode = ERR_PTR(-ESTALE);
}
}
return inode;
}
static struct inode *new_simple_dir(struct super_block *s,
struct btrfs_key *key,
struct btrfs_root *root)
{
struct inode *inode = new_inode(s);
if (!inode)
return ERR_PTR(-ENOMEM);
BTRFS_I(inode)->root = root;
memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
inode->i_op = &btrfs_dir_ro_inode_operations;
inode->i_fop = &simple_dir_operations;
inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
inode->i_mtime = CURRENT_TIME;
inode->i_atime = inode->i_mtime;
inode->i_ctime = inode->i_mtime;
BTRFS_I(inode)->i_otime = inode->i_mtime;
return inode;
}
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
{
struct inode *inode;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *sub_root = root;
struct btrfs_key location;
int index;
int ret = 0;
if (dentry->d_name.len > BTRFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
ret = btrfs_inode_by_name(dir, dentry, &location);
if (ret < 0)
return ERR_PTR(ret);
if (location.objectid == 0)
return ERR_PTR(-ENOENT);
if (location.type == BTRFS_INODE_ITEM_KEY) {
inode = btrfs_iget(dir->i_sb, &location, root, NULL);
return inode;
}
BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
index = srcu_read_lock(&root->fs_info->subvol_srcu);
ret = fixup_tree_root_location(root, dir, dentry,
&location, &sub_root);
if (ret < 0) {
if (ret != -ENOENT)
inode = ERR_PTR(ret);
else
inode = new_simple_dir(dir->i_sb, &location, sub_root);
} else {
inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
}
srcu_read_unlock(&root->fs_info->subvol_srcu, index);
if (!IS_ERR(inode) && root != sub_root) {
down_read(&root->fs_info->cleanup_work_sem);
if (!(inode->i_sb->s_flags & MS_RDONLY))
ret = btrfs_orphan_cleanup(sub_root);
up_read(&root->fs_info->cleanup_work_sem);
if (ret) {
iput(inode);
inode = ERR_PTR(ret);
}
}
return inode;
}
static int btrfs_dentry_delete(const struct dentry *dentry)
{
struct btrfs_root *root;
struct inode *inode = d_inode(dentry);
if (!inode && !IS_ROOT(dentry))
inode = d_inode(dentry->d_parent);
if (inode) {
root = BTRFS_I(inode)->root;
if (btrfs_root_refs(&root->root_item) == 0)
return 1;
if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return 1;
}
return 0;
}
static void btrfs_dentry_release(struct dentry *dentry)
{
kfree(dentry->d_fsdata);
}
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct inode *inode;
inode = btrfs_lookup_dentry(dir, dentry);
if (IS_ERR(inode)) {
if (PTR_ERR(inode) == -ENOENT)
inode = NULL;
else
return ERR_CAST(inode);
}
return d_splice_alias(inode, dentry);
}
unsigned char btrfs_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_item *item;
struct btrfs_dir_item *di;
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_path *path;
struct list_head ins_list;
struct list_head del_list;
int ret;
struct extent_buffer *leaf;
int slot;
unsigned char d_type;
int over = 0;
u32 di_cur;
u32 di_total;
u32 di_len;
int key_type = BTRFS_DIR_INDEX_KEY;
char tmp_name[32];
char *name_ptr;
int name_len;
int is_curr = 0; /* ctx->pos points to the current index? */
/* FIXME, use a real flag for deciding about the key type */
if (root->fs_info->tree_root == root)
key_type = BTRFS_DIR_ITEM_KEY;
if (!dir_emit_dots(file, ctx))
return 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = 1;
if (key_type == BTRFS_DIR_INDEX_KEY) {
INIT_LIST_HEAD(&ins_list);
INIT_LIST_HEAD(&del_list);
btrfs_get_delayed_items(inode, &ins_list, &del_list);
}
key.type = key_type;
key.offset = ctx->pos;
key.objectid = btrfs_ino(inode);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto err;
while (1) {
leaf = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
goto err;
else if (ret > 0)
break;
continue;
}
item = btrfs_item_nr(slot);
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.objectid != key.objectid)
break;
if (found_key.type != key_type)
break;
if (found_key.offset < ctx->pos)
goto next;
if (key_type == BTRFS_DIR_INDEX_KEY &&
btrfs_should_delete_dir_index(&del_list,
found_key.offset))
goto next;
ctx->pos = found_key.offset;
is_curr = 1;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
di_cur = 0;
di_total = btrfs_item_size(leaf, item);
while (di_cur < di_total) {
struct btrfs_key location;
if (verify_dir_item(root, leaf, di))
break;
name_len = btrfs_dir_name_len(leaf, di);
if (name_len <= sizeof(tmp_name)) {
name_ptr = tmp_name;
} else {
name_ptr = kmalloc(name_len, GFP_NOFS);
if (!name_ptr) {
ret = -ENOMEM;
goto err;
}
}
read_extent_buffer(leaf, name_ptr,
(unsigned long)(di + 1), name_len);
d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
btrfs_dir_item_key_to_cpu(leaf, di, &location);
/* is this a reference to our own snapshot? If so
* skip it.
*
* In contrast to old kernels, we insert the snapshot's
* dir item and dir index after it has been created, so
* we won't find a reference to our own snapshot. We
* still keep the following code for backward
* compatibility.
*/
if (location.type == BTRFS_ROOT_ITEM_KEY &&
location.objectid == root->root_key.objectid) {
over = 0;
goto skip;
}
over = !dir_emit(ctx, name_ptr, name_len,
location.objectid, d_type);
skip:
if (name_ptr != tmp_name)
kfree(name_ptr);
if (over)
goto nopos;
di_len = btrfs_dir_name_len(leaf, di) +
btrfs_dir_data_len(leaf, di) + sizeof(*di);
di_cur += di_len;
di = (struct btrfs_dir_item *)((char *)di + di_len);
}
next:
path->slots[0]++;
}
if (key_type == BTRFS_DIR_INDEX_KEY) {
if (is_curr)
ctx->pos++;
ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
if (ret)
goto nopos;
}
/* Reached end of directory/root. Bump pos past the last item. */
ctx->pos++;
/*
* Stop new entries from being returned after we return the last
* entry.
*
* New directory entries are assigned a strictly increasing
* offset. This means that new entries created during readdir
* are *guaranteed* to be seen in the future by that readdir.
* This has broken buggy programs which operate on names as
* they're returned by readdir. Until we re-use freed offsets
* we have this hack to stop new entries from being returned
* under the assumption that they'll never reach this huge
* offset.
*
* This is being careful not to overflow 32bit loff_t unless the
* last entry requires it because doing so has broken 32bit apps
* in the past.
*/
if (key_type == BTRFS_DIR_INDEX_KEY) {
if (ctx->pos >= INT_MAX)
ctx->pos = LLONG_MAX;
else
ctx->pos = INT_MAX;
}
nopos:
ret = 0;
err:
if (key_type == BTRFS_DIR_INDEX_KEY)
btrfs_put_delayed_items(&ins_list, &del_list);
btrfs_free_path(path);
return ret;
}
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret = 0;
bool nolock = false;
if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
return 0;
if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
nolock = true;
if (wbc->sync_mode == WB_SYNC_ALL) {
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_commit_transaction(trans, root);
}
return ret;
}
/*
* This is somewhat expensive, updating the tree every time the
* inode changes. But, it is most likely to find the inode in cache.
* FIXME, needs more benchmarking...there are no reasons other than performance
* to keep or drop this code.
*/
static int btrfs_dirty_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret;
if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
return 0;
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
if (ret && ret == -ENOSPC) {
/* whoops, lets try again with the full transaction */
btrfs_end_transaction(trans, root);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
}
btrfs_end_transaction(trans, root);
if (BTRFS_I(inode)->delayed_node)
btrfs_balance_delayed_items(root);
return ret;
}
/*
* This is a copy of file_update_time. We need this so we can return error on
* ENOSPC for updating the inode in the case of file write and mmap writes.
*/
static int btrfs_update_time(struct inode *inode, struct timespec *now,
int flags)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
if (btrfs_root_readonly(root))
return -EROFS;
if (flags & S_VERSION)
inode_inc_iversion(inode);
if (flags & S_CTIME)
inode->i_ctime = *now;
if (flags & S_MTIME)
inode->i_mtime = *now;
if (flags & S_ATIME)
inode->i_atime = *now;
return btrfs_dirty_inode(inode);
}
/*
* find the highest existing sequence number in a directory
* and then set the in-memory index_cnt variable to reflect
* free sequence numbers
*/
static int btrfs_set_inode_index_count(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key key, found_key;
struct btrfs_path *path;
struct extent_buffer *leaf;
int ret;
key.objectid = btrfs_ino(inode);
key.type = BTRFS_DIR_INDEX_KEY;
key.offset = (u64)-1;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
/* FIXME: we should be able to handle this */
if (ret == 0)
goto out;
ret = 0;
/*
* MAGIC NUMBER EXPLANATION:
* since we search a directory based on f_pos we have to start at 2
* since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
* else has to start at 2
*/
if (path->slots[0] == 0) {
BTRFS_I(inode)->index_cnt = 2;
goto out;
}
path->slots[0]--;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != btrfs_ino(inode) ||
found_key.type != BTRFS_DIR_INDEX_KEY) {
BTRFS_I(inode)->index_cnt = 2;
goto out;
}
BTRFS_I(inode)->index_cnt = found_key.offset + 1;
out:
btrfs_free_path(path);
return ret;
}
/*
* helper to find a free sequence number in a given directory. This current
* code is very simple, later versions will do smarter things in the btree
*/
int btrfs_set_inode_index(struct inode *dir, u64 *index)
{
int ret = 0;
if (BTRFS_I(dir)->index_cnt == (u64)-1) {
ret = btrfs_inode_delayed_dir_index_count(dir);
if (ret) {
ret = btrfs_set_inode_index_count(dir);
if (ret)
return ret;
}
}
*index = BTRFS_I(dir)->index_cnt;
BTRFS_I(dir)->index_cnt++;
return ret;
}
static int btrfs_insert_inode_locked(struct inode *inode)
{
struct btrfs_iget_args args;
args.location = &BTRFS_I(inode)->location;
args.root = BTRFS_I(inode)->root;
return insert_inode_locked4(inode,
btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
btrfs_find_actor, &args);
}
static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir,
const char *name, int name_len,
u64 ref_objectid, u64 objectid,
umode_t mode, u64 *index)
{
struct inode *inode;
struct btrfs_inode_item *inode_item;
struct btrfs_key *location;
struct btrfs_path *path;
struct btrfs_inode_ref *ref;
struct btrfs_key key[2];
u32 sizes[2];
int nitems = name ? 2 : 1;
unsigned long ptr;
int ret;
path = btrfs_alloc_path();
if (!path)
return ERR_PTR(-ENOMEM);
inode = new_inode(root->fs_info->sb);
if (!inode) {
btrfs_free_path(path);
return ERR_PTR(-ENOMEM);
}
/*
* O_TMPFILE, set link count to 0, so that after this point,
* we fill in an inode item with the correct link count.
*/
if (!name)
set_nlink(inode, 0);
/*
* we have to initialize this early, so we can reclaim the inode
* number if we fail afterwards in this function.
*/
inode->i_ino = objectid;
if (dir && name) {
trace_btrfs_inode_request(dir);
ret = btrfs_set_inode_index(dir, index);
if (ret) {
btrfs_free_path(path);
iput(inode);
return ERR_PTR(ret);
}
} else if (dir) {
*index = 0;
}
/*
* index_cnt is ignored for everything but a dir,
* btrfs_get_inode_index_count has an explanation for the magic
* number
*/
BTRFS_I(inode)->index_cnt = 2;
BTRFS_I(inode)->dir_index = *index;
BTRFS_I(inode)->root = root;
BTRFS_I(inode)->generation = trans->transid;
inode->i_generation = BTRFS_I(inode)->generation;
/*
* We could have gotten an inode number from somebody who was fsynced
* and then removed in this same transaction, so let's just set full
* sync since it will be a full sync anyway and this will blow away the
* old info in the log.
*/
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
key[0].objectid = objectid;
key[0].type = BTRFS_INODE_ITEM_KEY;
key[0].offset = 0;
sizes[0] = sizeof(struct btrfs_inode_item);
if (name) {
/*
* Start new inodes with an inode_ref. This is slightly more
* efficient for small numbers of hard links since they will
* be packed into one item. Extended refs will kick in if we
* add more hard links than can fit in the ref item.
*/
key[1].objectid = objectid;
key[1].type = BTRFS_INODE_REF_KEY;
key[1].offset = ref_objectid;
sizes[1] = name_len + sizeof(*ref);
}
location = &BTRFS_I(inode)->location;
location->objectid = objectid;
location->offset = 0;
location->type = BTRFS_INODE_ITEM_KEY;
ret = btrfs_insert_inode_locked(inode);
if (ret < 0)
goto fail;
path->leave_spinning = 1;
ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
if (ret != 0)
goto fail_unlock;
inode_init_owner(inode, dir, mode);
inode_set_bytes(inode, 0);
inode->i_mtime = CURRENT_TIME;
inode->i_atime = inode->i_mtime;
inode->i_ctime = inode->i_mtime;
BTRFS_I(inode)->i_otime = inode->i_mtime;
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
sizeof(*inode_item));
fill_inode_item(trans, path->nodes[0], inode_item, inode);
if (name) {
ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
struct btrfs_inode_ref);
btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
ptr = (unsigned long)(ref + 1);
write_extent_buffer(path->nodes[0], name, ptr, name_len);
}
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_free_path(path);
btrfs_inherit_iflags(inode, dir);
if (S_ISREG(mode)) {
if (btrfs_test_opt(root, NODATASUM))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
if (btrfs_test_opt(root, NODATACOW))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
BTRFS_INODE_NODATASUM;
}
inode_tree_add(inode);
trace_btrfs_inode_new(inode);
btrfs_set_inode_last_trans(trans, inode);
btrfs_update_root_times(trans, root);
ret = btrfs_inode_inherit_props(trans, inode, dir);
if (ret)
btrfs_err(root->fs_info,
"error inheriting props for ino %llu (root %llu): %d",
btrfs_ino(inode), root->root_key.objectid, ret);
return inode;
fail_unlock:
unlock_new_inode(inode);
fail:
if (dir && name)
BTRFS_I(dir)->index_cnt--;
btrfs_free_path(path);
iput(inode);
return ERR_PTR(ret);
}
static inline u8 btrfs_inode_type(struct inode *inode)
{
return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
}
/*
* utility function to add 'inode' into 'parent_inode' with
* a give name and a given sequence number.
* if 'add_backref' is true, also insert a backref from the
* inode to the parent directory.
*/
int btrfs_add_link(struct btrfs_trans_handle *trans,
struct inode *parent_inode, struct inode *inode,
const char *name, int name_len, int add_backref, u64 index)
{
int ret = 0;
struct btrfs_key key;
struct btrfs_root *root = BTRFS_I(parent_inode)->root;
u64 ino = btrfs_ino(inode);
u64 parent_ino = btrfs_ino(parent_inode);
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
} else {
key.objectid = ino;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
}
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
key.objectid, root->root_key.objectid,
parent_ino, index, name, name_len);
} else if (add_backref) {
ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
parent_ino, index);
}
/* Nothing to clean up yet */
if (ret)
return ret;
ret = btrfs_insert_dir_item(trans, root, name, name_len,
parent_inode, &key,
btrfs_inode_type(inode), index);
if (ret == -EEXIST || ret == -EOVERFLOW)
goto fail_dir_item;
else if (ret) {
btrfs_abort_transaction(trans, root, ret);
return ret;
}
btrfs_i_size_write(parent_inode, parent_inode->i_size +
name_len * 2);
inode_inc_iversion(parent_inode);
parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, parent_inode);
if (ret)
btrfs_abort_transaction(trans, root, ret);
return ret;
fail_dir_item:
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
u64 local_index;
int err;
err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
key.objectid, root->root_key.objectid,
parent_ino, &local_index, name, name_len);
} else if (add_backref) {
u64 local_index;
int err;
err = btrfs_del_inode_ref(trans, root, name, name_len,
ino, parent_ino, &local_index);
}
return ret;
}
static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
struct inode *dir, struct dentry *dentry,
struct inode *inode, int backref, u64 index)
{
int err = btrfs_add_link(trans, dir, inode,
dentry->d_name.name, dentry->d_name.len,
backref, index);
if (err > 0)
err = -EEXIST;
return err;
}
static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t rdev)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
int err;
int drop_inode = 0;
u64 objectid;
u64 index = 0;
if (!new_valid_dev(rdev))
return -EINVAL;
/*
* 2 for inode item and ref
* 2 for dir items
* 1 for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_unlock;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_unlock;
}
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
* if the filesystem supports xattrs by looking at the
* ops vector.
*/
inode->i_op = &btrfs_special_inode_operations;
init_special_inode(inode, inode->i_mode, rdev);
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
goto out_unlock_inode;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err) {
goto out_unlock_inode;
} else {
btrfs_update_inode(trans, root, inode);
unlock_new_inode(inode);
d_instantiate(dentry, inode);
}
out_unlock:
btrfs_end_transaction(trans, root);
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
return err;
out_unlock_inode:
drop_inode = 1;
unlock_new_inode(inode);
goto out_unlock;
}
static int btrfs_create(struct inode *dir, struct dentry *dentry,
umode_t mode, bool excl)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
int drop_inode_on_err = 0;
int err;
u64 objectid;
u64 index = 0;
/*
* 2 for inode item and ref
* 2 for dir items
* 1 for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_unlock;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_unlock;
}
drop_inode_on_err = 1;
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
* if the filesystem supports xattrs by looking at the
* ops vector.
*/
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
inode->i_mapping->a_ops = &btrfs_aops;
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
goto out_unlock_inode;
err = btrfs_update_inode(trans, root, inode);
if (err)
goto out_unlock_inode;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
goto out_unlock_inode;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
unlock_new_inode(inode);
d_instantiate(dentry, inode);
out_unlock:
btrfs_end_transaction(trans, root);
if (err && drop_inode_on_err) {
inode_dec_link_count(inode);
iput(inode);
}
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
return err;
out_unlock_inode:
unlock_new_inode(inode);
goto out_unlock;
}
static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = d_inode(old_dentry);
u64 index;
int err;
int drop_inode = 0;
/* do not allow sys_link's with other subvols of the same device */
if (root->objectid != BTRFS_I(inode)->root->objectid)
return -EXDEV;
if (inode->i_nlink >= BTRFS_LINK_MAX)
return -EMLINK;
err = btrfs_set_inode_index(dir, &index);
if (err)
goto fail;
/*
* 2 items for inode and inode ref
* 2 items for dir items
* 1 item for parent inode
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto fail;
}
/* There are several dir indexes for this inode, clear the cache. */
BTRFS_I(inode)->dir_index = 0ULL;
inc_nlink(inode);
inode_inc_iversion(inode);
inode->i_ctime = CURRENT_TIME;
ihold(inode);
set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
if (err) {
drop_inode = 1;
} else {
struct dentry *parent = dentry->d_parent;
err = btrfs_update_inode(trans, root, inode);
if (err)
goto fail;
if (inode->i_nlink == 1) {
/*
* If new hard link count is 1, it's a file created
* with open(2) O_TMPFILE flag.
*/
err = btrfs_orphan_del(trans, inode);
if (err)
goto fail;
}
d_instantiate(dentry, inode);
btrfs_log_new_name(trans, inode, NULL, parent);
}
btrfs_end_transaction(trans, root);
btrfs_balance_delayed_items(root);
fail:
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
btrfs_btree_balance_dirty(root);
return err;
}
static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
int err = 0;
int drop_on_err = 0;
u64 objectid = 0;
u64 index = 0;
/*
* 2 items for inode and ref
* 2 items for dir items
* 1 for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_fail;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
S_IFDIR | mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_fail;
}
drop_on_err = 1;
/* these must be set before we unlock the inode */
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
goto out_fail_inode;
btrfs_i_size_write(inode, 0);
err = btrfs_update_inode(trans, root, inode);
if (err)
goto out_fail_inode;
err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
dentry->d_name.len, 0, index);
if (err)
goto out_fail_inode;
d_instantiate(dentry, inode);
/*
* mkdir is special. We're unlocking after we call d_instantiate
* to avoid a race with nfsd calling d_instantiate.
*/
unlock_new_inode(inode);
drop_on_err = 0;
out_fail:
btrfs_end_transaction(trans, root);
if (drop_on_err) {
inode_dec_link_count(inode);
iput(inode);
}
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
return err;
out_fail_inode:
unlock_new_inode(inode);
goto out_fail;
}
/* Find next extent map of a given extent map, caller needs to ensure locks */
static struct extent_map *next_extent_map(struct extent_map *em)
{
struct rb_node *next;
next = rb_next(&em->rb_node);
if (!next)
return NULL;
return container_of(next, struct extent_map, rb_node);
}
static struct extent_map *prev_extent_map(struct extent_map *em)
{
struct rb_node *prev;
prev = rb_prev(&em->rb_node);
if (!prev)
return NULL;
return container_of(prev, struct extent_map, rb_node);
}
/* helper for btfs_get_extent. Given an existing extent in the tree,
* the existing extent is the nearest extent to map_start,
* and an extent that you want to insert, deal with overlap and insert
* the best fitted new extent into the tree.
*/
static int merge_extent_mapping(struct extent_map_tree *em_tree,
struct extent_map *existing,
struct extent_map *em,
u64 map_start)
{
struct extent_map *prev;
struct extent_map *next;
u64 start;
u64 end;
u64 start_diff;
BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
if (existing->start > map_start) {
next = existing;
prev = prev_extent_map(next);
} else {
prev = existing;
next = next_extent_map(prev);
}
start = prev ? extent_map_end(prev) : em->start;
start = max_t(u64, start, em->start);
end = next ? next->start : extent_map_end(em);
end = min_t(u64, end, extent_map_end(em));
start_diff = start - em->start;
em->start = start;
em->len = end - start;
if (em->block_start < EXTENT_MAP_LAST_BYTE &&
!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
em->block_start += start_diff;
em->block_len -= start_diff;
}
return add_extent_mapping(em_tree, em, 0);
}
static noinline int uncompress_inline(struct btrfs_path *path,
struct inode *inode, struct page *page,
size_t pg_offset, u64 extent_offset,
struct btrfs_file_extent_item *item)
{
int ret;
struct extent_buffer *leaf = path->nodes[0];
char *tmp;
size_t max_size;
unsigned long inline_size;
unsigned long ptr;
int compress_type;
WARN_ON(pg_offset != 0);
compress_type = btrfs_file_extent_compression(leaf, item);
max_size = btrfs_file_extent_ram_bytes(leaf, item);
inline_size = btrfs_file_extent_inline_item_len(leaf,
btrfs_item_nr(path->slots[0]));
tmp = kmalloc(inline_size, GFP_NOFS);
if (!tmp)
return -ENOMEM;
ptr = btrfs_file_extent_inline_start(item);
read_extent_buffer(leaf, tmp, ptr, inline_size);
max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
ret = btrfs_decompress(compress_type, tmp, page,
extent_offset, inline_size, max_size);
kfree(tmp);
return ret;
}
/*
* a bit scary, this does extent mapping from logical file offset to the disk.
* the ugly parts come from merging extents from the disk with the in-ram
* representation. This gets more complex because of the data=ordered code,
* where the in-ram extents might be locked pending data=ordered completion.
*
* This also copies inline extents directly into the page.
*/
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
size_t pg_offset, u64 start, u64 len,
int create)
{
int ret;
int err = 0;
u64 extent_start = 0;
u64 extent_end = 0;
u64 objectid = btrfs_ino(inode);
u32 found_type;
struct btrfs_path *path = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_file_extent_item *item;
struct extent_buffer *leaf;
struct btrfs_key found_key;
struct extent_map *em = NULL;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_trans_handle *trans = NULL;
const bool new_inline = !page || create;
again:
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (em)
em->bdev = root->fs_info->fs_devices->latest_bdev;
read_unlock(&em_tree->lock);
if (em) {
if (em->start > start || em->start + em->len <= start)
free_extent_map(em);
else if (em->block_start == EXTENT_MAP_INLINE && page)
free_extent_map(em);
else
goto out;
}
em = alloc_extent_map();
if (!em) {
err = -ENOMEM;
goto out;
}
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->start = EXTENT_MAP_HOLE;
em->orig_start = EXTENT_MAP_HOLE;
em->len = (u64)-1;
em->block_len = (u64)-1;
if (!path) {
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
goto out;
}
/*
* Chances are we'll be called again, so go ahead and do
* readahead
*/
path->reada = 1;
}
ret = btrfs_lookup_file_extent(trans, root, path,
objectid, start, trans != NULL);
if (ret < 0) {
err = ret;
goto out;
}
if (ret != 0) {
if (path->slots[0] == 0)
goto not_found;
path->slots[0]--;
}
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
/* are we inside the extent that was found? */
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
found_type = found_key.type;
if (found_key.objectid != objectid ||
found_type != BTRFS_EXTENT_DATA_KEY) {
/*
* If we backup past the first extent we want to move forward
* and see if there is an extent in front of us, otherwise we'll
* say there is a hole for our whole search range which can
* cause problems.
*/
extent_end = start;
goto next;
}
found_type = btrfs_file_extent_type(leaf, item);
extent_start = found_key.offset;
if (found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
extent_end = extent_start +
btrfs_file_extent_num_bytes(leaf, item);
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size_t size;
size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
extent_end = ALIGN(extent_start + size, root->sectorsize);
}
next:
if (start >= extent_end) {
path->slots[0]++;
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0)
goto not_found;
leaf = path->nodes[0];
}
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != objectid ||
found_key.type != BTRFS_EXTENT_DATA_KEY)
goto not_found;
if (start + len <= found_key.offset)
goto not_found;
if (start > found_key.offset)
goto next;
em->start = start;
em->orig_start = start;
em->len = found_key.offset - start;
goto not_found_em;
}
btrfs_extent_item_to_extent_map(inode, path, item, new_inline, em);
if (found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
goto insert;
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
unsigned long ptr;
char *map;
size_t size;
size_t extent_offset;
size_t copy_size;
if (new_inline)
goto out;
size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
extent_offset = page_offset(page) + pg_offset - extent_start;
copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
size - extent_offset);
em->start = extent_start + extent_offset;
em->len = ALIGN(copy_size, root->sectorsize);
em->orig_block_len = em->len;
em->orig_start = em->start;
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
if (create == 0 && !PageUptodate(page)) {
if (btrfs_file_extent_compression(leaf, item) !=
BTRFS_COMPRESS_NONE) {
ret = uncompress_inline(path, inode, page,
pg_offset,
extent_offset, item);
if (ret) {
err = ret;
goto out;
}
} else {
map = kmap(page);
read_extent_buffer(leaf, map + pg_offset, ptr,
copy_size);
if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
memset(map + pg_offset + copy_size, 0,
PAGE_CACHE_SIZE - pg_offset -
copy_size);
}
kunmap(page);
}
flush_dcache_page(page);
} else if (create && PageUptodate(page)) {
BUG();
if (!trans) {
kunmap(page);
free_extent_map(em);
em = NULL;
btrfs_release_path(path);
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return ERR_CAST(trans);
goto again;
}
map = kmap(page);
write_extent_buffer(leaf, map + pg_offset, ptr,
copy_size);
kunmap(page);
btrfs_mark_buffer_dirty(leaf);
}
set_extent_uptodate(io_tree, em->start,
extent_map_end(em) - 1, NULL, GFP_NOFS);
goto insert;
}
not_found:
em->start = start;
em->orig_start = start;
em->len = len;
not_found_em:
em->block_start = EXTENT_MAP_HOLE;
set_bit(EXTENT_FLAG_VACANCY, &em->flags);
insert:
btrfs_release_path(path);
if (em->start > start || extent_map_end(em) <= start) {
btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
em->start, em->len, start, len);
err = -EIO;
goto out;
}
err = 0;
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 0);
/* it is possible that someone inserted the extent into the tree
* while we had the lock dropped. It is also possible that
* an overlapping map exists in the tree
*/
if (ret == -EEXIST) {
struct extent_map *existing;
ret = 0;
existing = search_extent_mapping(em_tree, start, len);
/*
* existing will always be non-NULL, since there must be
* extent causing the -EEXIST.
*/
if (start >= extent_map_end(existing) ||
start <= existing->start) {
/*
* The existing extent map is the one nearest to
* the [start, start + len) range which overlaps
*/
err = merge_extent_mapping(em_tree, existing,
em, start);
free_extent_map(existing);
if (err) {
free_extent_map(em);
em = NULL;
}
} else {
free_extent_map(em);
em = existing;
err = 0;
}
}
write_unlock(&em_tree->lock);
out:
trace_btrfs_get_extent(root, em);
btrfs_free_path(path);
if (trans) {
ret = btrfs_end_transaction(trans, root);
if (!err)
err = ret;
}
if (err) {
free_extent_map(em);
return ERR_PTR(err);
}
BUG_ON(!em); /* Error is always set */
return em;
}
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
size_t pg_offset, u64 start, u64 len,
int create)
{
struct extent_map *em;
struct extent_map *hole_em = NULL;
u64 range_start = start;
u64 end;
u64 found;
u64 found_end;
int err = 0;
em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
if (IS_ERR(em))
return em;
if (em) {
/*
* if our em maps to
* - a hole or
* - a pre-alloc extent,
* there might actually be delalloc bytes behind it.
*/
if (em->block_start != EXTENT_MAP_HOLE &&
!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
return em;
else
hole_em = em;
}
/* check to see if we've wrapped (len == -1 or similar) */
end = start + len;
if (end < start)
end = (u64)-1;
else
end -= 1;
em = NULL;
/* ok, we didn't find anything, lets look for delalloc */
found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
end, len, EXTENT_DELALLOC, 1);
found_end = range_start + found;
if (found_end < range_start)
found_end = (u64)-1;
/*
* we didn't find anything useful, return
* the original results from get_extent()
*/
if (range_start > end || found_end <= start) {
em = hole_em;
hole_em = NULL;
goto out;
}
/* adjust the range_start to make sure it doesn't
* go backwards from the start they passed in
*/
range_start = max(start, range_start);
found = found_end - range_start;
if (found > 0) {
u64 hole_start = start;
u64 hole_len = len;
em = alloc_extent_map();
if (!em) {
err = -ENOMEM;
goto out;
}
/*
* when btrfs_get_extent can't find anything it
* returns one huge hole
*
* make sure what it found really fits our range, and
* adjust to make sure it is based on the start from
* the caller
*/
if (hole_em) {
u64 calc_end = extent_map_end(hole_em);
if (calc_end <= start || (hole_em->start > end)) {
free_extent_map(hole_em);
hole_em = NULL;
} else {
hole_start = max(hole_em->start, start);
hole_len = calc_end - hole_start;
}
}
em->bdev = NULL;
if (hole_em && range_start > hole_start) {
/* our hole starts before our delalloc, so we
* have to return just the parts of the hole
* that go until the delalloc starts
*/
em->len = min(hole_len,
range_start - hole_start);
em->start = hole_start;
em->orig_start = hole_start;
/*
* don't adjust block start at all,
* it is fixed at EXTENT_MAP_HOLE
*/
em->block_start = hole_em->block_start;
em->block_len = hole_len;
if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
} else {
em->start = range_start;
em->len = found;
em->orig_start = range_start;
em->block_start = EXTENT_MAP_DELALLOC;
em->block_len = found;
}
} else if (hole_em) {
return hole_em;
}
out:
free_extent_map(hole_em);
if (err) {
free_extent_map(em);
return ERR_PTR(err);
}
return em;
}
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_map *em;
struct btrfs_key ins;
u64 alloc_hint;
int ret;
alloc_hint = get_extent_allocation_hint(inode, start, len);
ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
alloc_hint, &ins, 1, 1);
if (ret)
return ERR_PTR(ret);
em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
ins.offset, ins.offset, ins.offset, 0);
if (IS_ERR(em)) {
btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
return em;
}
ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
ins.offset, ins.offset, 0);
if (ret) {
btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
free_extent_map(em);
return ERR_PTR(ret);
}
return em;
}
/*
* returns 1 when the nocow is safe, < 1 on error, 0 if the
* block must be cow'd
*/
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
u64 *orig_start, u64 *orig_block_len,
u64 *ram_bytes)
{
struct btrfs_trans_handle *trans;
struct btrfs_path *path;
int ret;
struct extent_buffer *leaf;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
u64 disk_bytenr;
u64 backref_offset;
u64 extent_end;
u64 num_bytes;
int slot;
int found_type;
bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
offset, 0);
if (ret < 0)
goto out;
slot = path->slots[0];
if (ret == 1) {
if (slot == 0) {
/* can't find the item, must cow */
ret = 0;
goto out;
}
slot--;
}
ret = 0;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.objectid != btrfs_ino(inode) ||
key.type != BTRFS_EXTENT_DATA_KEY) {
/* not our file or wrong item type, must cow */
goto out;
}
if (key.offset > offset) {
/* Wrong offset, must cow */
goto out;
}
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
found_type = btrfs_file_extent_type(leaf, fi);
if (found_type != BTRFS_FILE_EXTENT_REG &&
found_type != BTRFS_FILE_EXTENT_PREALLOC) {
/* not a regular extent, must cow */
goto out;
}
if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
goto out;
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
if (extent_end <= offset)
goto out;
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
if (disk_bytenr == 0)
goto out;
if (btrfs_file_extent_compression(leaf, fi) ||
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi))
goto out;
backref_offset = btrfs_file_extent_offset(leaf, fi);
if (orig_start) {
*orig_start = key.offset - backref_offset;
*orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
}
if (btrfs_extent_readonly(root, disk_bytenr))
goto out;
num_bytes = min(offset + *len, extent_end) - offset;
if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
u64 range_end;
range_end = round_up(offset + num_bytes, root->sectorsize) - 1;
ret = test_range_bit(io_tree, offset, range_end,
EXTENT_DELALLOC, 0, NULL);
if (ret) {
ret = -EAGAIN;
goto out;
}
}
btrfs_release_path(path);
/*
* look for other files referencing this extent, if we
* find any we must cow
*/
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = 0;
goto out;
}
ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
key.offset - backref_offset, disk_bytenr);
btrfs_end_transaction(trans, root);
if (ret) {
ret = 0;
goto out;
}
/*
* adjust disk_bytenr and num_bytes to cover just the bytes
* in this extent we are about to write. If there
* are any csums in that range we have to cow in order
* to keep the csums correct
*/
disk_bytenr += backref_offset;
disk_bytenr += offset - key.offset;
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
goto out;
/*
* all of the above have passed, it is safe to overwrite this extent
* without cow
*/
*len = num_bytes;
ret = 1;
out:
btrfs_free_path(path);
return ret;
}
bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
{
struct radix_tree_root *root = &inode->i_mapping->page_tree;
int found = false;
void **pagep = NULL;
struct page *page = NULL;
int start_idx;
int end_idx;
start_idx = start >> PAGE_CACHE_SHIFT;
/*
* end is the last byte in the last page. end == start is legal
*/
end_idx = end >> PAGE_CACHE_SHIFT;
rcu_read_lock();
/* Most of the code in this while loop is lifted from
* find_get_page. It's been modified to begin searching from a
* page and return just the first page found in that range. If the
* found idx is less than or equal to the end idx then we know that
* a page exists. If no pages are found or if those pages are
* outside of the range then we're fine (yay!) */
while (page == NULL &&
radix_tree_gang_lookup_slot(root, &pagep, NULL, start_idx, 1)) {
page = radix_tree_deref_slot(pagep);
if (unlikely(!page))
break;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
page = NULL;
continue;
}
/*
* Otherwise, shmem/tmpfs must be storing a swap entry
* here as an exceptional entry: so return it without
* attempting to raise page count.
*/
page = NULL;
break; /* TODO: Is this relevant for this use case? */
}
if (!page_cache_get_speculative(page)) {
page = NULL;
continue;
}
/*
* Has the page moved?
* This is part of the lockless pagecache protocol. See
* include/linux/pagemap.h for details.
*/
if (unlikely(page != *pagep)) {
page_cache_release(page);
page = NULL;
}
}
if (page) {
if (page->index <= end_idx)
found = true;
page_cache_release(page);
}
rcu_read_unlock();
return found;
}
static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
struct extent_state **cached_state, int writing)
{
struct btrfs_ordered_extent *ordered;
int ret = 0;
while (1) {
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
0, cached_state);
/*
* We're concerned with the entire range that we're going to be
* doing DIO to, so we need to make sure theres no ordered
* extents in this range.
*/
ordered = btrfs_lookup_ordered_range(inode, lockstart,
lockend - lockstart + 1);
/*
* We need to make sure there are no buffered pages in this
* range either, we could have raced between the invalidate in
* generic_file_direct_write and locking the extent. The
* invalidate needs to happen so that reads after a write do not
* get stale data.
*/
if (!ordered &&
(!writing ||
!btrfs_page_exists_in_range(inode, lockstart, lockend)))
break;
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
cached_state, GFP_NOFS);
if (ordered) {
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
} else {
/* Screw you mmap */
ret = btrfs_fdatawrite_range(inode, lockstart, lockend);
if (ret)
break;
ret = filemap_fdatawait_range(inode->i_mapping,
lockstart,
lockend);
if (ret)
break;
/*
* If we found a page that couldn't be invalidated just
* fall back to buffered.
*/
ret = invalidate_inode_pages2_range(inode->i_mapping,
lockstart >> PAGE_CACHE_SHIFT,
lockend >> PAGE_CACHE_SHIFT);
if (ret)
break;
}
cond_resched();
}
return ret;
}
static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
u64 len, u64 orig_start,
u64 block_start, u64 block_len,
u64 orig_block_len, u64 ram_bytes,
int type)
{
struct extent_map_tree *em_tree;
struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
em_tree = &BTRFS_I(inode)->extent_tree;
em = alloc_extent_map();
if (!em)
return ERR_PTR(-ENOMEM);
em->start = start;
em->orig_start = orig_start;
em->mod_start = start;
em->mod_len = len;
em->len = len;
em->block_len = block_len;
em->block_start = block_start;
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->orig_block_len = orig_block_len;
em->ram_bytes = ram_bytes;
em->generation = -1;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
if (type == BTRFS_ORDERED_PREALLOC)
set_bit(EXTENT_FLAG_FILLING, &em->flags);
do {
btrfs_drop_extent_cache(inode, em->start,
em->start + em->len - 1, 0);
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 1);
write_unlock(&em_tree->lock);
} while (ret == -EEXIST);
if (ret) {
free_extent_map(em);
return ERR_PTR(ret);
}
return em;
}
struct btrfs_dio_data {
u64 outstanding_extents;
u64 reserve;
};
static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_state *cached_state = NULL;
struct btrfs_dio_data *dio_data = NULL;
u64 start = iblock << inode->i_blkbits;
u64 lockstart, lockend;
u64 len = bh_result->b_size;
int unlock_bits = EXTENT_LOCKED;
int ret = 0;
if (create)
unlock_bits |= EXTENT_DIRTY;
else
len = min_t(u64, len, root->sectorsize);
lockstart = start;
lockend = start + len - 1;
if (current->journal_info) {
/*
* Need to pull our outstanding extents and set journal_info to NULL so
* that anything that needs to check if there's a transction doesn't get
* confused.
*/
dio_data = current->journal_info;
current->journal_info = NULL;
}
/*
* If this errors out it's because we couldn't invalidate pagecache for
* this range and we need to fallback to buffered.
*/
if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
return -ENOTBLK;
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto unlock_err;
}
/*
* Ok for INLINE and COMPRESSED extents we need to fallback on buffered
* io. INLINE is special, and we could probably kludge it in here, but
* it's still buffered so for safety lets just fall back to the generic
* buffered path.
*
* For COMPRESSED we _have_ to read the entire extent in so we can
* decompress it, so there will be buffering required no matter what we
* do, so go ahead and fallback to buffered.
*
* We return -ENOTBLK because thats what makes DIO go ahead and go back
* to buffered IO. Don't blame me, this is the price we pay for using
* the generic code.
*/
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
em->block_start == EXTENT_MAP_INLINE) {
free_extent_map(em);
ret = -ENOTBLK;
goto unlock_err;
}
/* Just a good old fashioned hole, return */
if (!create && (em->block_start == EXTENT_MAP_HOLE ||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
free_extent_map(em);
goto unlock_err;
}
/*
* We don't allocate a new extent in the following cases
*
* 1) The inode is marked as NODATACOW. In this case we'll just use the
* existing extent.
* 2) The extent is marked as PREALLOC. We're good to go here and can
* just use the extent.
*
*/
if (!create) {
len = min(len, em->len - (start - em->start));
lockstart = start + len;
goto unlock;
}
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
em->block_start != EXTENT_MAP_HOLE)) {
int type;
u64 block_start, orig_start, orig_block_len, ram_bytes;
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
type = BTRFS_ORDERED_PREALLOC;
else
type = BTRFS_ORDERED_NOCOW;
len = min(len, em->len - (start - em->start));
block_start = em->block_start + (start - em->start);
if (can_nocow_extent(inode, start, &len, &orig_start,
&orig_block_len, &ram_bytes) == 1) {
if (type == BTRFS_ORDERED_PREALLOC) {
free_extent_map(em);
em = create_pinned_em(inode, start, len,
orig_start,
block_start, len,
orig_block_len,
ram_bytes, type);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto unlock_err;
}
}
ret = btrfs_add_ordered_extent_dio(inode, start,
block_start, len, len, type);
if (ret) {
free_extent_map(em);
goto unlock_err;
}
goto unlock;
}
}
/*
* this will cow the extent, reset the len in case we changed
* it above
*/
len = bh_result->b_size;
free_extent_map(em);
em = btrfs_new_extent_direct(inode, start, len);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto unlock_err;
}
len = min(len, em->len - (start - em->start));
unlock:
bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
inode->i_blkbits;
bh_result->b_size = len;
bh_result->b_bdev = em->bdev;
set_buffer_mapped(bh_result);
if (create) {
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
set_buffer_new(bh_result);
/*
* Need to update the i_size under the extent lock so buffered
* readers will get the updated i_size when we unlock.
*/
if (start + len > i_size_read(inode))
i_size_write(inode, start + len);
/*
* If we have an outstanding_extents count still set then we're
* within our reservation, otherwise we need to adjust our inode
* counter appropriately.
*/
if (dio_data->outstanding_extents) {
(dio_data->outstanding_extents)--;
} else {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
}
btrfs_free_reserved_data_space(inode, len);
WARN_ON(dio_data->reserve < len);
dio_data->reserve -= len;
current->journal_info = dio_data;
}
/*
* In the case of write we need to clear and unlock the entire range,
* in the case of read we need to unlock only the end area that we
* aren't using if there is any left over space.
*/
if (lockstart < lockend) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, unlock_bits, 1, 0,
&cached_state, GFP_NOFS);
} else {
free_extent_state(cached_state);
}
free_extent_map(em);
return 0;
unlock_err:
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
unlock_bits, 1, 0, &cached_state, GFP_NOFS);
if (dio_data)
current->journal_info = dio_data;
return ret;
}
static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
int rw, int mirror_num)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
BUG_ON(rw & REQ_WRITE);
bio_get(bio);
ret = btrfs_bio_wq_end_io(root->fs_info, bio,
BTRFS_WQ_ENDIO_DIO_REPAIR);
if (ret)
goto err;
ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
err:
bio_put(bio);
return ret;
}
static int btrfs_check_dio_repairable(struct inode *inode,
struct bio *failed_bio,
struct io_failure_record *failrec,
int failed_mirror)
{
int num_copies;
num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
failrec->logical, failrec->len);
if (num_copies == 1) {
/*
* we only have a single copy of the data, so don't bother with
* all the retry and error correction code that follows. no
* matter what the error is, it is very likely to persist.
*/
pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
num_copies, failrec->this_mirror, failed_mirror);
return 0;
}
failrec->failed_mirror = failed_mirror;
failrec->this_mirror++;
if (failrec->this_mirror == failed_mirror)
failrec->this_mirror++;
if (failrec->this_mirror > num_copies) {
pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
num_copies, failrec->this_mirror, failed_mirror);
return 0;
}
return 1;
}
static int dio_read_error(struct inode *inode, struct bio *failed_bio,
struct page *page, u64 start, u64 end,
int failed_mirror, bio_end_io_t *repair_endio,
void *repair_arg)
{
struct io_failure_record *failrec;
struct bio *bio;
int isector;
int read_mode;
int ret;
BUG_ON(failed_bio->bi_rw & REQ_WRITE);
ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
if (ret)
return ret;
ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
failed_mirror);
if (!ret) {
free_io_failure(inode, failrec);
return -EIO;
}
if (failed_bio->bi_vcnt > 1)
read_mode = READ_SYNC | REQ_FAILFAST_DEV;
else
read_mode = READ_SYNC;
isector = start - btrfs_io_bio(failed_bio)->logical;
isector >>= inode->i_sb->s_blocksize_bits;
bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
0, isector, repair_endio, repair_arg);
if (!bio) {
free_io_failure(inode, failrec);
return -EIO;
}
btrfs_debug(BTRFS_I(inode)->root->fs_info,
"Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
read_mode, failrec->this_mirror, failrec->in_validation);
ret = submit_dio_repair_bio(inode, bio, read_mode,
failrec->this_mirror);
if (ret) {
free_io_failure(inode, failrec);
bio_put(bio);
}
return ret;
}
struct btrfs_retry_complete {
struct completion done;
struct inode *inode;
u64 start;
int uptodate;
};
static void btrfs_retry_endio_nocsum(struct bio *bio)
{
struct btrfs_retry_complete *done = bio->bi_private;
struct bio_vec *bvec;
int i;
if (bio->bi_error)
goto end;
done->uptodate = 1;
bio_for_each_segment_all(bvec, bio, i)
clean_io_failure(done->inode, done->start, bvec->bv_page, 0);
end:
complete(&done->done);
bio_put(bio);
}
static int __btrfs_correct_data_nocsum(struct inode *inode,
struct btrfs_io_bio *io_bio)
{
struct bio_vec *bvec;
struct btrfs_retry_complete done;
u64 start;
int i;
int ret;
start = io_bio->logical;
done.inode = inode;
bio_for_each_segment_all(bvec, &io_bio->bio, i) {
try_again:
done.uptodate = 0;
done.start = start;
init_completion(&done.done);
ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
start + bvec->bv_len - 1,
io_bio->mirror_num,
btrfs_retry_endio_nocsum, &done);
if (ret)
return ret;
wait_for_completion(&done.done);
if (!done.uptodate) {
/* We might have another mirror, so try again */
goto try_again;
}
start += bvec->bv_len;
}
return 0;
}
static void btrfs_retry_endio(struct bio *bio)
{
struct btrfs_retry_complete *done = bio->bi_private;
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
struct bio_vec *bvec;
int uptodate;
int ret;
int i;
if (bio->bi_error)
goto end;
uptodate = 1;
bio_for_each_segment_all(bvec, bio, i) {
ret = __readpage_endio_check(done->inode, io_bio, i,
bvec->bv_page, 0,
done->start, bvec->bv_len);
if (!ret)
clean_io_failure(done->inode, done->start,
bvec->bv_page, 0);
else
uptodate = 0;
}
done->uptodate = uptodate;
end:
complete(&done->done);
bio_put(bio);
}
static int __btrfs_subio_endio_read(struct inode *inode,
struct btrfs_io_bio *io_bio, int err)
{
struct bio_vec *bvec;
struct btrfs_retry_complete done;
u64 start;
u64 offset = 0;
int i;
int ret;
err = 0;
start = io_bio->logical;
done.inode = inode;
bio_for_each_segment_all(bvec, &io_bio->bio, i) {
ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
0, start, bvec->bv_len);
if (likely(!ret))
goto next;
try_again:
done.uptodate = 0;
done.start = start;
init_completion(&done.done);
ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
start + bvec->bv_len - 1,
io_bio->mirror_num,
btrfs_retry_endio, &done);
if (ret) {
err = ret;
goto next;
}
wait_for_completion(&done.done);
if (!done.uptodate) {
/* We might have another mirror, so try again */
goto try_again;
}
next:
offset += bvec->bv_len;
start += bvec->bv_len;
}
return err;
}
static int btrfs_subio_endio_read(struct inode *inode,
struct btrfs_io_bio *io_bio, int err)
{
bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
if (skip_csum) {
if (unlikely(err))
return __btrfs_correct_data_nocsum(inode, io_bio);
else
return 0;
} else {
return __btrfs_subio_endio_read(inode, io_bio, err);
}
}
static void btrfs_endio_direct_read(struct bio *bio)
{
struct btrfs_dio_private *dip = bio->bi_private;
struct inode *inode = dip->inode;
struct bio *dio_bio;
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
int err = bio->bi_error;
if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
err = btrfs_subio_endio_read(inode, io_bio, err);
unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
dip->logical_offset + dip->bytes - 1);
dio_bio = dip->dio_bio;
kfree(dip);
dio_end_io(dio_bio, bio->bi_error);
if (io_bio->end_io)
io_bio->end_io(io_bio, err);
bio_put(bio);
}
static void btrfs_endio_direct_write(struct bio *bio)
{
struct btrfs_dio_private *dip = bio->bi_private;
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ordered_extent *ordered = NULL;
u64 ordered_offset = dip->logical_offset;
u64 ordered_bytes = dip->bytes;
struct bio *dio_bio;
int ret;
again:
ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
&ordered_offset,
ordered_bytes,
!bio->bi_error);
if (!ret)
goto out_test;
btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
finish_ordered_fn, NULL, NULL);
btrfs_queue_work(root->fs_info->endio_write_workers,
&ordered->work);
out_test:
/*
* our bio might span multiple ordered extents. If we haven't
* completed the accounting for the whole dio, go back and try again
*/
if (ordered_offset < dip->logical_offset + dip->bytes) {
ordered_bytes = dip->logical_offset + dip->bytes -
ordered_offset;
ordered = NULL;
goto again;
}
dio_bio = dip->dio_bio;
kfree(dip);
dio_end_io(dio_bio, bio->bi_error);
bio_put(bio);
}
static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
struct bio *bio, int mirror_num,
unsigned long bio_flags, u64 offset)
{
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
BUG_ON(ret); /* -ENOMEM */
return 0;
}
static void btrfs_end_dio_bio(struct bio *bio)
{
struct btrfs_dio_private *dip = bio->bi_private;
int err = bio->bi_error;
if (err)
btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
"direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
btrfs_ino(dip->inode), bio->bi_rw,
(unsigned long long)bio->bi_iter.bi_sector,
bio->bi_iter.bi_size, err);
if (dip->subio_endio)
err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
if (err) {
dip->errors = 1;
/*
* before atomic variable goto zero, we must make sure
* dip->errors is perceived to be set.
*/
smp_mb__before_atomic();
}
/* if there are more bios still pending for this dio, just exit */
if (!atomic_dec_and_test(&dip->pending_bios))
goto out;
if (dip->errors) {
bio_io_error(dip->orig_bio);
} else {
dip->dio_bio->bi_error = 0;
bio_endio(dip->orig_bio);
}
out:
bio_put(bio);
}
static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
u64 first_sector, gfp_t gfp_flags)
{
struct bio *bio;
bio = btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags);
if (bio)
bio_associate_current(bio);
return bio;
}
static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
struct inode *inode,
struct btrfs_dio_private *dip,
struct bio *bio,
u64 file_offset)
{
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
int ret;
/*
* We load all the csum data we need when we submit
* the first bio to reduce the csum tree search and
* contention.
*/
if (dip->logical_offset == file_offset) {
ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio,
file_offset);
if (ret)
return ret;
}
if (bio == dip->orig_bio)
return 0;
file_offset -= dip->logical_offset;
file_offset >>= inode->i_sb->s_blocksize_bits;
io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
return 0;
}
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
int rw, u64 file_offset, int skip_sum,
int async_submit)
{
struct btrfs_dio_private *dip = bio->bi_private;
int write = rw & REQ_WRITE;
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
if (async_submit)
async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
bio_get(bio);
if (!write) {
ret = btrfs_bio_wq_end_io(root->fs_info, bio,
BTRFS_WQ_ENDIO_DATA);
if (ret)
goto err;
}
if (skip_sum)
goto map;
if (write && async_submit) {
ret = btrfs_wq_submit_bio(root->fs_info,
inode, rw, bio, 0, 0,
file_offset,
__btrfs_submit_bio_start_direct_io,
__btrfs_submit_bio_done);
goto err;
} else if (write) {
/*
* If we aren't doing async submit, calculate the csum of the
* bio now.
*/
ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
if (ret)
goto err;
} else {
ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio,
file_offset);
if (ret)
goto err;
}
map:
ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
err:
bio_put(bio);
return ret;
}
static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
int skip_sum)
{
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct bio *bio;
struct bio *orig_bio = dip->orig_bio;
struct bio_vec *bvec = orig_bio->bi_io_vec;
u64 start_sector = orig_bio->bi_iter.bi_sector;
u64 file_offset = dip->logical_offset;
u64 submit_len = 0;
u64 map_length;
int nr_pages = 0;
int ret;
int async_submit = 0;
map_length = orig_bio->bi_iter.bi_size;
ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
&map_length, NULL, 0);
if (ret)
return -EIO;
if (map_length >= orig_bio->bi_iter.bi_size) {
bio = orig_bio;
dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
goto submit;
}
/* async crcs make it difficult to collect full stripe writes. */
if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK)
async_submit = 0;
else
async_submit = 1;
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
if (!bio)
return -ENOMEM;
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
btrfs_io_bio(bio)->logical = file_offset;
atomic_inc(&dip->pending_bios);
while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
if (map_length < submit_len + bvec->bv_len ||
bio_add_page(bio, bvec->bv_page, bvec->bv_len,
bvec->bv_offset) < bvec->bv_len) {
/*
* inc the count before we submit the bio so
* we know the end IO handler won't happen before
* we inc the count. Otherwise, the dip might get freed
* before we're done setting it up
*/
atomic_inc(&dip->pending_bios);
ret = __btrfs_submit_dio_bio(bio, inode, rw,
file_offset, skip_sum,
async_submit);
if (ret) {
bio_put(bio);
atomic_dec(&dip->pending_bios);
goto out_err;
}
start_sector += submit_len >> 9;
file_offset += submit_len;
submit_len = 0;
nr_pages = 0;
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
start_sector, GFP_NOFS);
if (!bio)
goto out_err;
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
btrfs_io_bio(bio)->logical = file_offset;
map_length = orig_bio->bi_iter.bi_size;
ret = btrfs_map_block(root->fs_info, rw,
start_sector << 9,
&map_length, NULL, 0);
if (ret) {
bio_put(bio);
goto out_err;
}
} else {
submit_len += bvec->bv_len;
nr_pages++;
bvec++;
}
}
submit:
ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
async_submit);
if (!ret)
return 0;
bio_put(bio);
out_err:
dip->errors = 1;
/*
* before atomic variable goto zero, we must
* make sure dip->errors is perceived to be set.
*/
smp_mb__before_atomic();
if (atomic_dec_and_test(&dip->pending_bios))
bio_io_error(dip->orig_bio);
/* bio_end_io() will handle error, so we needn't return it */
return 0;
}
static void btrfs_submit_direct(int rw, struct bio *dio_bio,
struct inode *inode, loff_t file_offset)
{
struct btrfs_dio_private *dip = NULL;
struct bio *io_bio = NULL;
struct btrfs_io_bio *btrfs_bio;
int skip_sum;
int write = rw & REQ_WRITE;
int ret = 0;
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
if (!io_bio) {
ret = -ENOMEM;
goto free_ordered;
}
dip = kzalloc(sizeof(*dip), GFP_NOFS);
if (!dip) {
ret = -ENOMEM;
goto free_ordered;
}
dip->private = dio_bio->bi_private;
dip->inode = inode;
dip->logical_offset = file_offset;
dip->bytes = dio_bio->bi_iter.bi_size;
dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
io_bio->bi_private = dip;
dip->orig_bio = io_bio;
dip->dio_bio = dio_bio;
atomic_set(&dip->pending_bios, 0);
btrfs_bio = btrfs_io_bio(io_bio);
btrfs_bio->logical = file_offset;
if (write) {
io_bio->bi_end_io = btrfs_endio_direct_write;
} else {
io_bio->bi_end_io = btrfs_endio_direct_read;
dip->subio_endio = btrfs_subio_endio_read;
}
ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
if (!ret)
return;
if (btrfs_bio->end_io)
btrfs_bio->end_io(btrfs_bio, ret);
free_ordered:
/*
* If we arrived here it means either we failed to submit the dip
* or we either failed to clone the dio_bio or failed to allocate the
* dip. If we cloned the dio_bio and allocated the dip, we can just
* call bio_endio against our io_bio so that we get proper resource
* cleanup if we fail to submit the dip, otherwise, we must do the
* same as btrfs_endio_direct_[write|read] because we can't call these
* callbacks - they require an allocated dip and a clone of dio_bio.
*/
if (io_bio && dip) {
io_bio->bi_error = -EIO;
bio_endio(io_bio);
/*
* The end io callbacks free our dip, do the final put on io_bio
* and all the cleanup and final put for dio_bio (through
* dio_end_io()).
*/
dip = NULL;
io_bio = NULL;
} else {
if (write) {
struct btrfs_ordered_extent *ordered;
ordered = btrfs_lookup_ordered_extent(inode,
file_offset);
set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
/*
* Decrements our ref on the ordered extent and removes
* the ordered extent from the inode's ordered tree,
* doing all the proper resource cleanup such as for the
* reserved space and waking up any waiters for this
* ordered extent (through btrfs_remove_ordered_extent).
*/
btrfs_finish_ordered_io(ordered);
} else {
unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
file_offset + dio_bio->bi_iter.bi_size - 1);
}
dio_bio->bi_error = -EIO;
/*
* Releases and cleans up our dio_bio, no need to bio_put()
* nor bio_endio()/bio_io_error() against dio_bio.
*/
dio_end_io(dio_bio, ret);
}
if (io_bio)
bio_put(io_bio);
kfree(dip);
}
static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
const struct iov_iter *iter, loff_t offset)
{
int seg;
int i;
unsigned blocksize_mask = root->sectorsize - 1;
ssize_t retval = -EINVAL;
if (offset & blocksize_mask)
goto out;
if (iov_iter_alignment(iter) & blocksize_mask)
goto out;
/* If this is a write we don't need to check anymore */
if (iov_iter_rw(iter) == WRITE)
return 0;
/*
* Check to make sure we don't have duplicate iov_base's in this
* iovec, if so return EINVAL, otherwise we'll get csum errors
* when reading back.
*/
for (seg = 0; seg < iter->nr_segs; seg++) {
for (i = seg + 1; i < iter->nr_segs; i++) {
if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
goto out;
}
}
retval = 0;
out:
return retval;
}
static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_dio_data dio_data = { 0 };
size_t count = 0;
int flags = 0;
bool wakeup = true;
bool relock = false;
ssize_t ret;
if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
return 0;
inode_dio_begin(inode);
smp_mb__after_atomic();
/*
* The generic stuff only does filemap_write_and_wait_range, which
* isn't enough if we've written compressed pages to this area, so
* we need to flush the dirty pages again to make absolutely sure
* that any outstanding dirty pages are on disk.
*/
count = iov_iter_count(iter);
if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
&BTRFS_I(inode)->runtime_flags))
filemap_fdatawrite_range(inode->i_mapping, offset,
offset + count - 1);
if (iov_iter_rw(iter) == WRITE) {
/*
* If the write DIO is beyond the EOF, we need update
* the isize, but it is protected by i_mutex. So we can
* not unlock the i_mutex at this case.
*/
if (offset + count <= inode->i_size) {
mutex_unlock(&inode->i_mutex);
relock = true;
}
ret = btrfs_delalloc_reserve_space(inode, count);
if (ret)
goto out;
dio_data.outstanding_extents = div64_u64(count +
BTRFS_MAX_EXTENT_SIZE - 1,
BTRFS_MAX_EXTENT_SIZE);
/*
* We need to know how many extents we reserved so that we can
* do the accounting properly if we go over the number we
* originally calculated. Abuse current->journal_info for this.
*/
dio_data.reserve = round_up(count, root->sectorsize);
current->journal_info = &dio_data;
} else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
&BTRFS_I(inode)->runtime_flags)) {
inode_dio_end(inode);
flags = DIO_LOCKING | DIO_SKIP_HOLES;
wakeup = false;
}
ret = __blockdev_direct_IO(iocb, inode,
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
iter, offset, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, flags);
if (iov_iter_rw(iter) == WRITE) {
current->journal_info = NULL;
if (ret < 0 && ret != -EIOCBQUEUED) {
if (dio_data.reserve)
btrfs_delalloc_release_space(inode,
dio_data.reserve);
} else if (ret >= 0 && (size_t)ret < count)
btrfs_delalloc_release_space(inode,
count - (size_t)ret);
}
out:
if (wakeup)
inode_dio_end(inode);
if (relock)
mutex_lock(&inode->i_mutex);
return ret;
}
#define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC)
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
int ret;
ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
if (ret)
return ret;
return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
}
int btrfs_readpage(struct file *file, struct page *page)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(page->mapping->host)->io_tree;
return extent_read_full_page(tree, page, btrfs_get_extent, 0);
}
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
{
struct extent_io_tree *tree;
if (current->flags & PF_MEMALLOC) {
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
tree = &BTRFS_I(page->mapping->host)->io_tree;
return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
}
static int btrfs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(mapping->host)->io_tree;
return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
}
static int
btrfs_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(mapping->host)->io_tree;
return extent_readpages(tree, mapping, pages, nr_pages,
btrfs_get_extent);
}
static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
struct extent_io_tree *tree;
struct extent_map_tree *map;
int ret;
tree = &BTRFS_I(page->mapping->host)->io_tree;
map = &BTRFS_I(page->mapping->host)->extent_tree;
ret = try_release_extent_mapping(map, tree, page, gfp_flags);
if (ret == 1) {
ClearPagePrivate(page);
set_page_private(page, 0);
page_cache_release(page);
}
return ret;
}
static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
if (PageWriteback(page) || PageDirty(page))
return 0;
return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
}
static void btrfs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
struct inode *inode = page->mapping->host;
struct extent_io_tree *tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
u64 page_start = page_offset(page);
u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
int inode_evicting = inode->i_state & I_FREEING;
/*
* we have the page locked, so new writeback can't start,
* and the dirty bit won't be cleared while we are here.
*
* Wait for IO on this page so that we can safely clear
* the PagePrivate2 bit and do ordered accounting
*/
wait_on_page_writeback(page);
tree = &BTRFS_I(inode)->io_tree;
if (offset) {
btrfs_releasepage(page, GFP_NOFS);
return;
}
if (!inode_evicting)
lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
/*
* IO on this page will never be started, so we need
* to account for any ordered extents now
*/
if (!inode_evicting)
clear_extent_bit(tree, page_start, page_end,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, 1, 0, &cached_state,
GFP_NOFS);
/*
* whoever cleared the private bit is responsible
* for the finish_ordered_io
*/
if (TestClearPagePrivate2(page)) {
struct btrfs_ordered_inode_tree *tree;
u64 new_len;
tree = &BTRFS_I(inode)->ordered_tree;
spin_lock_irq(&tree->lock);
set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
new_len = page_start - ordered->file_offset;
if (new_len < ordered->truncated_len)
ordered->truncated_len = new_len;
spin_unlock_irq(&tree->lock);
if (btrfs_dec_test_ordered_pending(inode, &ordered,
page_start,
PAGE_CACHE_SIZE, 1))
btrfs_finish_ordered_io(ordered);
}
btrfs_put_ordered_extent(ordered);
if (!inode_evicting) {
cached_state = NULL;
lock_extent_bits(tree, page_start, page_end, 0,
&cached_state);
}
}
if (!inode_evicting) {
clear_extent_bit(tree, page_start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY |
EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, 1, 1,
&cached_state, GFP_NOFS);
__btrfs_releasepage(page, GFP_NOFS);
}
ClearPageChecked(page);
if (PagePrivate(page)) {
ClearPagePrivate(page);
set_page_private(page, 0);
page_cache_release(page);
}
}
/*
* btrfs_page_mkwrite() is not allowed to change the file size as it gets
* called from a page fault handler when a page is first dirtied. Hence we must
* be careful to check for EOF conditions here. We set the page up correctly
* for a written page which means we get ENOSPC checking when writing into
* holes and correct delalloc and unwritten extent mapping on filesystems that
* support these features.
*
* We are not allowed to take the i_mutex here so we have to play games to
* protect against truncate races as the page could now be beyond EOF. Because
* vmtruncate() writes the inode size before removing pages, once we have the
* page lock we can determine safely if the page is beyond EOF. If it is not
* beyond EOF, then the page is guaranteed safe against truncation until we
* unlock the page.
*/
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = file_inode(vma->vm_file);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
char *kaddr;
unsigned long zero_start;
loff_t size;
int ret;
int reserved = 0;
u64 page_start;
u64 page_end;
sb_start_pagefault(inode->i_sb);
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
if (!ret) {
ret = file_update_time(vma->vm_file);
reserved = 1;
}
if (ret) {
if (ret == -ENOMEM)
ret = VM_FAULT_OOM;
else /* -ENOSPC, -EIO, etc */
ret = VM_FAULT_SIGBUS;
if (reserved)
goto out;
goto out_noreserve;
}
ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
again:
lock_page(page);
size = i_size_read(inode);
page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
if ((page->mapping != inode->i_mapping) ||
(page_start >= size)) {
/* page got truncated out from underneath us */
goto out_unlock;
}
wait_on_page_writeback(page);
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
set_page_extent_mapped(page);
/*
* we can't set the delalloc bits if there are pending ordered
* extents. Drop our locks and wait for them to finish
*/
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
goto again;
}
/*
* XXX - page_mkwrite gets called every time the page is dirtied, even
* if it was already dirty, so for space accounting reasons we need to
* clear any delalloc bits for the range we are fixing to save. There
* is probably a better way to do this, but for now keep consistent with
* prepare_pages in the normal write path.
*/
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
&cached_state);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
ret = 0;
/* page is wholly or partially inside EOF */
if (page_start + PAGE_CACHE_SIZE > size)
zero_start = size & ~PAGE_CACHE_MASK;
else
zero_start = PAGE_CACHE_SIZE;
if (zero_start != PAGE_CACHE_SIZE) {
kaddr = kmap(page);
memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
flush_dcache_page(page);
kunmap(page);
}
ClearPageChecked(page);
set_page_dirty(page);
SetPageUptodate(page);
BTRFS_I(inode)->last_trans = root->fs_info->generation;
BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
out_unlock:
if (!ret) {
sb_end_pagefault(inode->i_sb);
return VM_FAULT_LOCKED;
}
unlock_page(page);
out:
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
out_noreserve:
sb_end_pagefault(inode->i_sb);
return ret;
}
static int btrfs_truncate(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv;
int ret = 0;
int err = 0;
struct btrfs_trans_handle *trans;
u64 mask = root->sectorsize - 1;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
(u64)-1);
if (ret)
return ret;
/*
* Yes ladies and gentelment, this is indeed ugly. The fact is we have
* 3 things going on here
*
* 1) We need to reserve space for our orphan item and the space to
* delete our orphan item. Lord knows we don't want to have a dangling
* orphan item because we didn't reserve space to remove it.
*
* 2) We need to reserve space to update our inode.
*
* 3) We need to have something to cache all the space that is going to
* be free'd up by the truncate operation, but also have some slack
* space reserved in case it uses space during the truncate (thank you
* very much snapshotting).
*
* And we need these to all be seperate. The fact is we can use alot of
* space doing the truncate, and we have no earthly idea how much space
* we will use, so we need the truncate reservation to be seperate so it
* doesn't end up using space reserved for updating the inode or
* removing the orphan item. We also need to be able to stop the
* transaction and start a new one, which means we need to be able to
* update the inode several times, and we have no idea of knowing how
* many times that will be, so we can't just reserve 1 item for the
* entirety of the opration, so that has to be done seperately as well.
* Then there is the orphan item, which does indeed need to be held on
* to for the whole operation, and we need nobody to touch this reserved
* space except the orphan code.
*
* So that leaves us with
*
* 1) root->orphan_block_rsv - for the orphan deletion.
* 2) rsv - for the truncate reservation, which we will steal from the
* transaction reservation.
* 3) fs_info->trans_block_rsv - this will have 1 items worth left for
* updating the inode.
*/
rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
if (!rsv)
return -ENOMEM;
rsv->size = min_size;
rsv->failfast = 1;
/*
* 1 for the truncate slack space
* 1 for updating the inode.
*/
trans = btrfs_start_transaction(root, 2);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto out;
}
/* Migrate the slack space for the truncate to our reserve */
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
min_size);
BUG_ON(ret);
/*
* So if we truncate and then write and fsync we normally would just
* write the extents that changed, which is a problem if we need to
* first truncate that entire inode. So set this flag so we write out
* all of the extents in the inode to the sync log so we're completely
* safe.
*/
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
trans->block_rsv = rsv;
while (1) {
ret = btrfs_truncate_inode_items(trans, root, inode,
inode->i_size,
BTRFS_EXTENT_DATA_KEY);
if (ret != -ENOSPC && ret != -EAGAIN) {
err = ret;
break;
}
trans->block_rsv = &root->fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
err = ret;
break;
}
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
trans = btrfs_start_transaction(root, 2);
if (IS_ERR(trans)) {
ret = err = PTR_ERR(trans);
trans = NULL;
break;
}
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
rsv, min_size);
BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv;
}
if (ret == 0 && inode->i_nlink > 0) {
trans->block_rsv = root->orphan_block_rsv;
ret = btrfs_orphan_del(trans, inode);
if (ret)
err = ret;
}
if (trans) {
trans->block_rsv = &root->fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
if (ret && !err)
err = ret;
ret = btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
}
out:
btrfs_free_block_rsv(root, rsv);
if (ret && !err)
err = ret;
return err;
}
/*
* create a new subvolume directory/inode (helper for the ioctl).
*/
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root,
struct btrfs_root *parent_root,
u64 new_dirid)
{
struct inode *inode;
int err;
u64 index = 0;
inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
new_dirid, new_dirid,
S_IFDIR | (~current_umask() & S_IRWXUGO),
&index);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
set_nlink(inode, 1);
btrfs_i_size_write(inode, 0);
unlock_new_inode(inode);
err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
if (err)
btrfs_err(new_root->fs_info,
"error inheriting subvolume %llu properties: %d",
new_root->root_key.objectid, err);
err = btrfs_update_inode(trans, new_root, inode);
iput(inode);
return err;
}
struct inode *btrfs_alloc_inode(struct super_block *sb)
{
struct btrfs_inode *ei;
struct inode *inode;
ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
if (!ei)
return NULL;
ei->root = NULL;
ei->generation = 0;
ei->last_trans = 0;
ei->last_sub_trans = 0;
ei->logged_trans = 0;
ei->delalloc_bytes = 0;
ei->defrag_bytes = 0;
ei->disk_i_size = 0;
ei->flags = 0;
ei->csum_bytes = 0;
ei->index_cnt = (u64)-1;
ei->dir_index = 0;
ei->last_unlink_trans = 0;
ei->last_log_commit = 0;
spin_lock_init(&ei->lock);
ei->outstanding_extents = 0;
ei->reserved_extents = 0;
ei->runtime_flags = 0;
ei->force_compress = BTRFS_COMPRESS_NONE;
ei->delayed_node = NULL;
ei->i_otime.tv_sec = 0;
ei->i_otime.tv_nsec = 0;
inode = &ei->vfs_inode;
extent_map_tree_init(&ei->extent_tree);
extent_io_tree_init(&ei->io_tree, &inode->i_data);
extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
ei->io_tree.track_uptodate = 1;
ei->io_failure_tree.track_uptodate = 1;
atomic_set(&ei->sync_writers, 0);
mutex_init(&ei->log_mutex);
mutex_init(&ei->delalloc_mutex);
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
INIT_LIST_HEAD(&ei->delalloc_inodes);
RB_CLEAR_NODE(&ei->rb_node);
return inode;
}
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
void btrfs_test_destroy_inode(struct inode *inode)
{
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
#endif
static void btrfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
void btrfs_destroy_inode(struct inode *inode)
{
struct btrfs_ordered_extent *ordered;
struct btrfs_root *root = BTRFS_I(inode)->root;
WARN_ON(!hlist_empty(&inode->i_dentry));
WARN_ON(inode->i_data.nrpages);
WARN_ON(BTRFS_I(inode)->outstanding_extents);
WARN_ON(BTRFS_I(inode)->reserved_extents);
WARN_ON(BTRFS_I(inode)->delalloc_bytes);
WARN_ON(BTRFS_I(inode)->csum_bytes);
WARN_ON(BTRFS_I(inode)->defrag_bytes);
/*
* This can happen where we create an inode, but somebody else also
* created the same inode and we need to destroy the one we already
* created.
*/
if (!root)
goto free;
if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags)) {
btrfs_info(root->fs_info, "inode %llu still on the orphan list",
btrfs_ino(inode));
atomic_dec(&root->orphan_inodes);
}
while (1) {
ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
if (!ordered)
break;
else {
btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
ordered->file_offset, ordered->len);
btrfs_remove_ordered_extent(inode, ordered);
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
}
}
inode_tree_del(inode);
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
free:
call_rcu(&inode->i_rcu, btrfs_i_callback);
}
int btrfs_drop_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
if (root == NULL)
return 1;
/* the snap/subvol tree is on deleting */
if (btrfs_root_refs(&root->root_item) == 0)
return 1;
else
return generic_drop_inode(inode);
}
static void init_once(void *foo)
{
struct btrfs_inode *ei = (struct btrfs_inode *) foo;
inode_init_once(&ei->vfs_inode);
}
void btrfs_destroy_cachep(void)
{
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
if (btrfs_inode_cachep)
kmem_cache_destroy(btrfs_inode_cachep);
if (btrfs_trans_handle_cachep)
kmem_cache_destroy(btrfs_trans_handle_cachep);
if (btrfs_transaction_cachep)
kmem_cache_destroy(btrfs_transaction_cachep);
if (btrfs_path_cachep)
kmem_cache_destroy(btrfs_path_cachep);
if (btrfs_free_space_cachep)
kmem_cache_destroy(btrfs_free_space_cachep);
if (btrfs_delalloc_work_cachep)
kmem_cache_destroy(btrfs_delalloc_work_cachep);
}
int btrfs_init_cachep(void)
{
btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
sizeof(struct btrfs_inode), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
if (!btrfs_inode_cachep)
goto fail;
btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
sizeof(struct btrfs_trans_handle), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_trans_handle_cachep)
goto fail;
btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
sizeof(struct btrfs_transaction), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_transaction_cachep)
goto fail;
btrfs_path_cachep = kmem_cache_create("btrfs_path",
sizeof(struct btrfs_path), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_path_cachep)
goto fail;
btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
sizeof(struct btrfs_free_space), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_free_space_cachep)
goto fail;
btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
sizeof(struct btrfs_delalloc_work), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
NULL);
if (!btrfs_delalloc_work_cachep)
goto fail;
return 0;
fail:
btrfs_destroy_cachep();
return -ENOMEM;
}
static int btrfs_getattr(struct vfsmount *mnt,
struct dentry *dentry, struct kstat *stat)
{
u64 delalloc_bytes;
struct inode *inode = d_inode(dentry);
u32 blocksize = inode->i_sb->s_blocksize;
generic_fillattr(inode, stat);
stat->dev = BTRFS_I(inode)->root->anon_dev;
stat->blksize = PAGE_CACHE_SIZE;
spin_lock(&BTRFS_I(inode)->lock);
delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
spin_unlock(&BTRFS_I(inode)->lock);
stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
ALIGN(delalloc_bytes, blocksize)) >> 9;
return 0;
}
static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(old_dir)->root;
struct btrfs_root *dest = BTRFS_I(new_dir)->root;
struct inode *new_inode = d_inode(new_dentry);
struct inode *old_inode = d_inode(old_dentry);
struct timespec ctime = CURRENT_TIME;
u64 index = 0;
u64 root_objectid;
int ret;
u64 old_ino = btrfs_ino(old_inode);
if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return -EPERM;
/* we only allow rename subvolume link between subvolumes */
if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
return -EXDEV;
if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
(new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
return -ENOTEMPTY;
if (S_ISDIR(old_inode->i_mode) && new_inode &&
new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
/* check for collisions, even if the name isn't there */
ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
new_dentry->d_name.name,
new_dentry->d_name.len);
if (ret) {
if (ret == -EEXIST) {
/* we shouldn't get
* eexist without a new_inode */
if (WARN_ON(!new_inode)) {
return ret;
}
} else {
/* maybe -EOVERFLOW */
return ret;
}
}
ret = 0;
/*
* we're using rename to replace one file with another. Start IO on it
* now so we don't add too much work to the end of the transaction
*/
if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
filemap_flush(old_inode->i_mapping);
/* close the racy window with snapshot create/destroy ioctl */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
down_read(&root->fs_info->subvol_sem);
/*
* We want to reserve the absolute worst case amount of items. So if
* both inodes are subvols and we need to unlink them then that would
* require 4 item modifications, but if they are both normal inodes it
* would require 5 item modifications, so we'll assume their normal
* inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
* should cover the worst case number of items we'll modify.
*/
trans = btrfs_start_transaction(root, 11);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_notrans;
}
if (dest != root)
btrfs_record_root_in_trans(trans, dest);
ret = btrfs_set_inode_index(new_dir, &index);
if (ret)
goto out_fail;
BTRFS_I(old_inode)->dir_index = 0ULL;
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
/* force full log commit if subvolume involved. */
btrfs_set_log_full_commit(root->fs_info, trans);
} else {
ret = btrfs_insert_inode_ref(trans, dest,
new_dentry->d_name.name,
new_dentry->d_name.len,
old_ino,
btrfs_ino(new_dir), index);
if (ret)
goto out_fail;
/*
* this is an ugly little race, but the rename is required
* to make sure that if we crash, the inode is either at the
* old name or the new one. pinning the log transaction lets
* us make sure we don't allow a log commit to come in after
* we unlink the name but before we add the new name back in.
*/
btrfs_pin_log_trans(root);
}
inode_inc_iversion(old_dir);
inode_inc_iversion(new_dir);
inode_inc_iversion(old_inode);
old_dir->i_ctime = old_dir->i_mtime = ctime;
new_dir->i_ctime = new_dir->i_mtime = ctime;
old_inode->i_ctime = ctime;
if (old_dentry->d_parent != new_dentry->d_parent)
btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
old_dentry->d_name.name,
old_dentry->d_name.len);
} else {
ret = __btrfs_unlink_inode(trans, root, old_dir,
d_inode(old_dentry),
old_dentry->d_name.name,
old_dentry->d_name.len);
if (!ret)
ret = btrfs_update_inode(trans, root, old_inode);
}
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_fail;
}
if (new_inode) {
inode_inc_iversion(new_inode);
new_inode->i_ctime = CURRENT_TIME;
if (unlikely(btrfs_ino(new_inode) ==
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
root_objectid = BTRFS_I(new_inode)->location.objectid;
ret = btrfs_unlink_subvol(trans, dest, new_dir,
root_objectid,
new_dentry->d_name.name,
new_dentry->d_name.len);
BUG_ON(new_inode->i_nlink == 0);
} else {
ret = btrfs_unlink_inode(trans, dest, new_dir,
d_inode(new_dentry),
new_dentry->d_name.name,
new_dentry->d_name.len);
}
if (!ret && new_inode->i_nlink == 0)
ret = btrfs_orphan_add(trans, d_inode(new_dentry));
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_fail;
}
}
ret = btrfs_add_link(trans, new_dir, old_inode,
new_dentry->d_name.name,
new_dentry->d_name.len, 0, index);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_fail;
}
if (old_inode->i_nlink == 1)
BTRFS_I(old_inode)->dir_index = index;
if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
struct dentry *parent = new_dentry->d_parent;
btrfs_log_new_name(trans, old_inode, old_dir, parent);
btrfs_end_log_trans(root);
}
out_fail:
btrfs_end_transaction(trans, root);
out_notrans:
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&root->fs_info->subvol_sem);
return ret;
}
static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry);
}
static void btrfs_run_delalloc_work(struct btrfs_work *work)
{
struct btrfs_delalloc_work *delalloc_work;
struct inode *inode;
delalloc_work = container_of(work, struct btrfs_delalloc_work,
work);
inode = delalloc_work->inode;
if (delalloc_work->wait) {
btrfs_wait_ordered_range(inode, 0, (u64)-1);
} else {
filemap_flush(inode->i_mapping);
if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
&BTRFS_I(inode)->runtime_flags))
filemap_flush(inode->i_mapping);
}
if (delalloc_work->delay_iput)
btrfs_add_delayed_iput(inode);
else
iput(inode);
complete(&delalloc_work->completion);
}
struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
int wait, int delay_iput)
{
struct btrfs_delalloc_work *work;
work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
if (!work)
return NULL;
init_completion(&work->completion);
INIT_LIST_HEAD(&work->list);
work->inode = inode;
work->wait = wait;
work->delay_iput = delay_iput;
WARN_ON_ONCE(!inode);
btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
btrfs_run_delalloc_work, NULL, NULL);
return work;
}
void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
{
wait_for_completion(&work->completion);
kmem_cache_free(btrfs_delalloc_work_cachep, work);
}
/*
* some fairly slow code that needs optimization. This walks the list
* of all the inodes with pending delalloc and forces them to disk.
*/
static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
int nr)
{
struct btrfs_inode *binode;
struct inode *inode;
struct btrfs_delalloc_work *work, *next;
struct list_head works;
struct list_head splice;
int ret = 0;
INIT_LIST_HEAD(&works);
INIT_LIST_HEAD(&splice);
mutex_lock(&root->delalloc_mutex);
spin_lock(&root->delalloc_lock);
list_splice_init(&root->delalloc_inodes, &splice);
while (!list_empty(&splice)) {
binode = list_entry(splice.next, struct btrfs_inode,
delalloc_inodes);
list_move_tail(&binode->delalloc_inodes,
&root->delalloc_inodes);
inode = igrab(&binode->vfs_inode);
if (!inode) {
cond_resched_lock(&root->delalloc_lock);
continue;
}
spin_unlock(&root->delalloc_lock);
work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
if (!work) {
if (delay_iput)
btrfs_add_delayed_iput(inode);
else
iput(inode);
ret = -ENOMEM;
goto out;
}
list_add_tail(&work->list, &works);
btrfs_queue_work(root->fs_info->flush_workers,
&work->work);
ret++;
if (nr != -1 && ret >= nr)
goto out;
cond_resched();
spin_lock(&root->delalloc_lock);
}
spin_unlock(&root->delalloc_lock);
out:
list_for_each_entry_safe(work, next, &works, list) {
list_del_init(&work->list);
btrfs_wait_and_free_delalloc_work(work);
}
if (!list_empty_careful(&splice)) {
spin_lock(&root->delalloc_lock);
list_splice_tail(&splice, &root->delalloc_inodes);
spin_unlock(&root->delalloc_lock);
}
mutex_unlock(&root->delalloc_mutex);
return ret;
}
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
{
int ret;
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
return -EROFS;
ret = __start_delalloc_inodes(root, delay_iput, -1);
if (ret > 0)
ret = 0;
/*
* the filemap_flush will queue IO into the worker threads, but
* we have to make sure the IO is actually started and that
* ordered extents get created before we return
*/
atomic_inc(&root->fs_info->async_submit_draining);
while (atomic_read(&root->fs_info->nr_async_submits) ||
atomic_read(&root->fs_info->async_delalloc_pages)) {
wait_event(root->fs_info->async_submit_wait,
(atomic_read(&root->fs_info->nr_async_submits) == 0 &&
atomic_read(&root->fs_info->async_delalloc_pages) == 0));
}
atomic_dec(&root->fs_info->async_submit_draining);
return ret;
}
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
int nr)
{
struct btrfs_root *root;
struct list_head splice;
int ret;
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
return -EROFS;
INIT_LIST_HEAD(&splice);
mutex_lock(&fs_info->delalloc_root_mutex);
spin_lock(&fs_info->delalloc_root_lock);
list_splice_init(&fs_info->delalloc_roots, &splice);
while (!list_empty(&splice) && nr) {
root = list_first_entry(&splice, struct btrfs_root,
delalloc_root);
root = btrfs_grab_fs_root(root);
BUG_ON(!root);
list_move_tail(&root->delalloc_root,
&fs_info->delalloc_roots);
spin_unlock(&fs_info->delalloc_root_lock);
ret = __start_delalloc_inodes(root, delay_iput, nr);
btrfs_put_fs_root(root);
if (ret < 0)
goto out;
if (nr != -1) {
nr -= ret;
WARN_ON(nr < 0);
}
spin_lock(&fs_info->delalloc_root_lock);
}
spin_unlock(&fs_info->delalloc_root_lock);
ret = 0;
atomic_inc(&fs_info->async_submit_draining);
while (atomic_read(&fs_info->nr_async_submits) ||
atomic_read(&fs_info->async_delalloc_pages)) {
wait_event(fs_info->async_submit_wait,
(atomic_read(&fs_info->nr_async_submits) == 0 &&
atomic_read(&fs_info->async_delalloc_pages) == 0));
}
atomic_dec(&fs_info->async_submit_draining);
out:
if (!list_empty_careful(&splice)) {
spin_lock(&fs_info->delalloc_root_lock);
list_splice_tail(&splice, &fs_info->delalloc_roots);
spin_unlock(&fs_info->delalloc_root_lock);
}
mutex_unlock(&fs_info->delalloc_root_mutex);
return ret;
}
static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
const char *symname)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
struct btrfs_key key;
struct inode *inode = NULL;
int err;
int drop_inode = 0;
u64 objectid;
u64 index = 0;
int name_len;
int datasize;
unsigned long ptr;
struct btrfs_file_extent_item *ei;
struct extent_buffer *leaf;
name_len = strlen(symname);
if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
return -ENAMETOOLONG;
/*
* 2 items for inode item and ref
* 2 items for dir items
* 1 item for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_unlock;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
S_IFLNK|S_IRWXUGO, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_unlock;
}
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
* if the filesystem supports xattrs by looking at the
* ops vector.
*/
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
inode->i_mapping->a_ops = &btrfs_aops;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
goto out_unlock_inode;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
goto out_unlock_inode;
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
goto out_unlock_inode;
}
key.objectid = btrfs_ino(inode);
key.offset = 0;
key.type = BTRFS_EXTENT_DATA_KEY;
datasize = btrfs_file_extent_calc_inline_size(name_len);
err = btrfs_insert_empty_item(trans, root, path, &key,
datasize);
if (err) {
btrfs_free_path(path);
goto out_unlock_inode;
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, ei, trans->transid);
btrfs_set_file_extent_type(leaf, ei,
BTRFS_FILE_EXTENT_INLINE);
btrfs_set_file_extent_encryption(leaf, ei, 0);
btrfs_set_file_extent_compression(leaf, ei, 0);
btrfs_set_file_extent_other_encoding(leaf, ei, 0);
btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
ptr = btrfs_file_extent_inline_start(ei);
write_extent_buffer(leaf, symname, ptr, name_len);
btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
inode->i_op = &btrfs_symlink_inode_operations;
inode->i_mapping->a_ops = &btrfs_symlink_aops;
inode_set_bytes(inode, name_len);
btrfs_i_size_write(inode, name_len);
err = btrfs_update_inode(trans, root, inode);
if (err) {
drop_inode = 1;
goto out_unlock_inode;
}
unlock_new_inode(inode);
d_instantiate(dentry, inode);
out_unlock:
btrfs_end_transaction(trans, root);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
btrfs_btree_balance_dirty(root);
return err;
out_unlock_inode:
drop_inode = 1;
unlock_new_inode(inode);
goto out_unlock;
}
static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint,
struct btrfs_trans_handle *trans)
{
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key ins;
u64 cur_offset = start;
u64 i_size;
u64 cur_bytes;
int ret = 0;
bool own_trans = true;
if (trans)
own_trans = false;
while (num_bytes > 0) {
if (own_trans) {
trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
break;
}
}
cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
cur_bytes = max(cur_bytes, min_size);
ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
*alloc_hint, &ins, 1, 0);
if (ret) {
if (own_trans)
btrfs_end_transaction(trans, root);
break;
}
ret = insert_reserved_file_extent(trans, inode,
cur_offset, ins.objectid,
ins.offset, ins.offset,
ins.offset, 0, 0, 0,
BTRFS_FILE_EXTENT_PREALLOC);
if (ret) {
btrfs_free_reserved_extent(root, ins.objectid,
ins.offset, 0);
btrfs_abort_transaction(trans, root, ret);
if (own_trans)
btrfs_end_transaction(trans, root);
break;
}
btrfs_drop_extent_cache(inode, cur_offset,
cur_offset + ins.offset -1, 0);
em = alloc_extent_map();
if (!em) {
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
goto next;
}
em->start = cur_offset;
em->orig_start = cur_offset;
em->len = ins.offset;
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->orig_block_len = ins.offset;
em->ram_bytes = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
em->generation = trans->transid;
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 1);
write_unlock(&em_tree->lock);
if (ret != -EEXIST)
break;
btrfs_drop_extent_cache(inode, cur_offset,
cur_offset + ins.offset - 1,
0);
}
free_extent_map(em);
next:
num_bytes -= ins.offset;
cur_offset += ins.offset;
*alloc_hint = ins.objectid + ins.offset;
inode_inc_iversion(inode);
inode->i_ctime = CURRENT_TIME;
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
(actual_len > inode->i_size) &&
(cur_offset > inode->i_size)) {
if (cur_offset > actual_len)
i_size = actual_len;
else
i_size = cur_offset;
i_size_write(inode, i_size);
btrfs_ordered_update_i_size(inode, i_size, NULL);
}
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
if (own_trans)
btrfs_end_transaction(trans, root);
break;
}
if (own_trans)
btrfs_end_transaction(trans, root);
}
return ret;
}
int btrfs_prealloc_file_range(struct inode *inode, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint)
{
return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
min_size, actual_len, alloc_hint,
NULL);
}
int btrfs_prealloc_file_range_trans(struct inode *inode,
struct btrfs_trans_handle *trans, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint)
{
return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
min_size, actual_len, alloc_hint, trans);
}
static int btrfs_set_page_dirty(struct page *page)
{
return __set_page_dirty_nobuffers(page);
}
static int btrfs_permission(struct inode *inode, int mask)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
umode_t mode = inode->i_mode;
if (mask & MAY_WRITE &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
if (btrfs_root_readonly(root))
return -EROFS;
if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
return -EACCES;
}
return generic_permission(inode, mask);
}
static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
u64 objectid;
u64 index;
int ret = 0;
/*
* 5 units required for adding orphan entry
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_find_free_ino(root, &objectid);
if (ret)
goto out;
inode = btrfs_new_inode(trans, root, dir, NULL, 0,
btrfs_ino(dir), objectid, mode, &index);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
inode = NULL;
goto out;
}
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
inode->i_mapping->a_ops = &btrfs_aops;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
ret = btrfs_init_inode_security(trans, inode, dir, NULL);
if (ret)
goto out_inode;
ret = btrfs_update_inode(trans, root, inode);
if (ret)
goto out_inode;
ret = btrfs_orphan_add(trans, inode);
if (ret)
goto out_inode;
/*
* We set number of links to 0 in btrfs_new_inode(), and here we set
* it to 1 because d_tmpfile() will issue a warning if the count is 0,
* through:
*
* d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
*/
set_nlink(inode, 1);
unlock_new_inode(inode);
d_tmpfile(dentry, inode);
mark_inode_dirty(inode);
out:
btrfs_end_transaction(trans, root);
if (ret)
iput(inode);
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
return ret;
out_inode:
unlock_new_inode(inode);
goto out;
}
/* Inspired by filemap_check_errors() */
int btrfs_inode_check_errors(struct inode *inode)
{
int ret = 0;
if (test_bit(AS_ENOSPC, &inode->i_mapping->flags) &&
test_and_clear_bit(AS_ENOSPC, &inode->i_mapping->flags))
ret = -ENOSPC;
if (test_bit(AS_EIO, &inode->i_mapping->flags) &&
test_and_clear_bit(AS_EIO, &inode->i_mapping->flags))
ret = -EIO;
return ret;
}
static const struct inode_operations btrfs_dir_inode_operations = {
.getattr = btrfs_getattr,
.lookup = btrfs_lookup,
.create = btrfs_create,
.unlink = btrfs_unlink,
.link = btrfs_link,
.mkdir = btrfs_mkdir,
.rmdir = btrfs_rmdir,
.rename2 = btrfs_rename2,
.symlink = btrfs_symlink,
.setattr = btrfs_setattr,
.mknod = btrfs_mknod,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.permission = btrfs_permission,
.get_acl = btrfs_get_acl,
.set_acl = btrfs_set_acl,
.update_time = btrfs_update_time,
.tmpfile = btrfs_tmpfile,
};
static const struct inode_operations btrfs_dir_ro_inode_operations = {
.lookup = btrfs_lookup,
.permission = btrfs_permission,
.get_acl = btrfs_get_acl,
.set_acl = btrfs_set_acl,
.update_time = btrfs_update_time,
};
static const struct file_operations btrfs_dir_file_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate = btrfs_real_readdir,
.unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = btrfs_ioctl,
#endif
.release = btrfs_release_file,
.fsync = btrfs_sync_file,
};
static struct extent_io_ops btrfs_extent_io_ops = {
.fill_delalloc = run_delalloc_range,
.submit_bio_hook = btrfs_submit_bio_hook,
.merge_bio_hook = btrfs_merge_bio_hook,
.readpage_end_io_hook = btrfs_readpage_end_io_hook,
.writepage_end_io_hook = btrfs_writepage_end_io_hook,
.writepage_start_hook = btrfs_writepage_start_hook,
.set_bit_hook = btrfs_set_bit_hook,
.clear_bit_hook = btrfs_clear_bit_hook,
.merge_extent_hook = btrfs_merge_extent_hook,
.split_extent_hook = btrfs_split_extent_hook,
};
/*
* btrfs doesn't support the bmap operation because swapfiles
* use bmap to make a mapping of extents in the file. They assume
* these extents won't change over the life of the file and they
* use the bmap result to do IO directly to the drive.
*
* the btrfs bmap call would return logical addresses that aren't
* suitable for IO and they also will change frequently as COW
* operations happen. So, swapfile + btrfs == corruption.
*
* For now we're avoiding this by dropping bmap.
*/
static const struct address_space_operations btrfs_aops = {
.readpage = btrfs_readpage,
.writepage = btrfs_writepage,
.writepages = btrfs_writepages,
.readpages = btrfs_readpages,
.direct_IO = btrfs_direct_IO,
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
.set_page_dirty = btrfs_set_page_dirty,
.error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations btrfs_symlink_aops = {
.readpage = btrfs_readpage,
.writepage = btrfs_writepage,
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
};
static const struct inode_operations btrfs_file_inode_operations = {
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.permission = btrfs_permission,
.fiemap = btrfs_fiemap,
.get_acl = btrfs_get_acl,
.set_acl = btrfs_set_acl,
.update_time = btrfs_update_time,
};
static const struct inode_operations btrfs_special_inode_operations = {
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.permission = btrfs_permission,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.get_acl = btrfs_get_acl,
.set_acl = btrfs_set_acl,
.update_time = btrfs_update_time,
};
static const struct inode_operations btrfs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.permission = btrfs_permission,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.update_time = btrfs_update_time,
};
const struct dentry_operations btrfs_dentry_operations = {
.d_delete = btrfs_dentry_delete,
.d_release = btrfs_dentry_release,
};
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_1781_0 |
crossvul-cpp_data_good_5059_0 | /*
* Timers abstract layer
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/string.h>
#include <sound/core.h>
#include <sound/timer.h>
#include <sound/control.h>
#include <sound/info.h>
#include <sound/minors.h>
#include <sound/initval.h>
#include <linux/kmod.h>
#if IS_ENABLED(CONFIG_SND_HRTIMER)
#define DEFAULT_TIMER_LIMIT 4
#else
#define DEFAULT_TIMER_LIMIT 1
#endif
static int timer_limit = DEFAULT_TIMER_LIMIT;
static int timer_tstamp_monotonic = 1;
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>");
MODULE_DESCRIPTION("ALSA timer interface");
MODULE_LICENSE("GPL");
module_param(timer_limit, int, 0444);
MODULE_PARM_DESC(timer_limit, "Maximum global timers in system.");
module_param(timer_tstamp_monotonic, int, 0444);
MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default).");
MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER);
MODULE_ALIAS("devname:snd/timer");
struct snd_timer_user {
struct snd_timer_instance *timeri;
int tread; /* enhanced read with timestamps and events */
unsigned long ticks;
unsigned long overrun;
int qhead;
int qtail;
int qused;
int queue_size;
bool disconnected;
struct snd_timer_read *queue;
struct snd_timer_tread *tqueue;
spinlock_t qlock;
unsigned long last_resolution;
unsigned int filter;
struct timespec tstamp; /* trigger tstamp */
wait_queue_head_t qchange_sleep;
struct fasync_struct *fasync;
struct mutex ioctl_lock;
};
/* list of timers */
static LIST_HEAD(snd_timer_list);
/* list of slave instances */
static LIST_HEAD(snd_timer_slave_list);
/* lock for slave active lists */
static DEFINE_SPINLOCK(slave_active_lock);
static DEFINE_MUTEX(register_mutex);
static int snd_timer_free(struct snd_timer *timer);
static int snd_timer_dev_free(struct snd_device *device);
static int snd_timer_dev_register(struct snd_device *device);
static int snd_timer_dev_disconnect(struct snd_device *device);
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left);
/*
* create a timer instance with the given owner string.
* when timer is not NULL, increments the module counter
*/
static struct snd_timer_instance *snd_timer_instance_new(char *owner,
struct snd_timer *timer)
{
struct snd_timer_instance *timeri;
timeri = kzalloc(sizeof(*timeri), GFP_KERNEL);
if (timeri == NULL)
return NULL;
timeri->owner = kstrdup(owner, GFP_KERNEL);
if (! timeri->owner) {
kfree(timeri);
return NULL;
}
INIT_LIST_HEAD(&timeri->open_list);
INIT_LIST_HEAD(&timeri->active_list);
INIT_LIST_HEAD(&timeri->ack_list);
INIT_LIST_HEAD(&timeri->slave_list_head);
INIT_LIST_HEAD(&timeri->slave_active_head);
timeri->timer = timer;
if (timer && !try_module_get(timer->module)) {
kfree(timeri->owner);
kfree(timeri);
return NULL;
}
return timeri;
}
/*
* find a timer instance from the given timer id
*/
static struct snd_timer *snd_timer_find(struct snd_timer_id *tid)
{
struct snd_timer *timer = NULL;
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->tmr_class != tid->dev_class)
continue;
if ((timer->tmr_class == SNDRV_TIMER_CLASS_CARD ||
timer->tmr_class == SNDRV_TIMER_CLASS_PCM) &&
(timer->card == NULL ||
timer->card->number != tid->card))
continue;
if (timer->tmr_device != tid->device)
continue;
if (timer->tmr_subdevice != tid->subdevice)
continue;
return timer;
}
return NULL;
}
#ifdef CONFIG_MODULES
static void snd_timer_request(struct snd_timer_id *tid)
{
switch (tid->dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
if (tid->device < timer_limit)
request_module("snd-timer-%i", tid->device);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (tid->card < snd_ecards_limit)
request_module("snd-card-%i", tid->card);
break;
default:
break;
}
}
#endif
/*
* look for a master instance matching with the slave id of the given slave.
* when found, relink the open_link of the slave.
*
* call this with register_mutex down.
*/
static void snd_timer_check_slave(struct snd_timer_instance *slave)
{
struct snd_timer *timer;
struct snd_timer_instance *master;
/* FIXME: it's really dumb to look up all entries.. */
list_for_each_entry(timer, &snd_timer_list, device_list) {
list_for_each_entry(master, &timer->open_list_head, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list,
&master->slave_list_head);
spin_lock_irq(&slave_active_lock);
slave->master = master;
slave->timer = master->timer;
spin_unlock_irq(&slave_active_lock);
return;
}
}
}
}
/*
* look for slave instances matching with the slave id of the given master.
* when found, relink the open_link of slaves.
*
* call this with register_mutex down.
*/
static void snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
/* check all pending slaves */
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list, &master->slave_list_head);
spin_lock_irq(&slave_active_lock);
spin_lock(&master->timer->lock);
slave->master = master;
slave->timer = master->timer;
if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
list_add_tail(&slave->active_list,
&master->slave_active_head);
spin_unlock(&master->timer->lock);
spin_unlock_irq(&slave_active_lock);
}
}
}
/*
* open a timer instance
* when opening a master, the slave id must be here given.
*/
int snd_timer_open(struct snd_timer_instance **ti,
char *owner, struct snd_timer_id *tid,
unsigned int slave_id)
{
struct snd_timer *timer;
struct snd_timer_instance *timeri = NULL;
if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
/* open a slave instance */
if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE ||
tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) {
pr_debug("ALSA: timer: invalid slave class %i\n",
tid->dev_sclass);
return -EINVAL;
}
mutex_lock(®ister_mutex);
timeri = snd_timer_instance_new(owner, NULL);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
snd_timer_check_slave(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
/* open a master instance */
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
#ifdef CONFIG_MODULES
if (!timer) {
mutex_unlock(®ister_mutex);
snd_timer_request(tid);
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
}
#endif
if (!timer) {
mutex_unlock(®ister_mutex);
return -ENODEV;
}
if (!list_empty(&timer->open_list_head)) {
timeri = list_entry(timer->open_list_head.next,
struct snd_timer_instance, open_list);
if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
mutex_unlock(®ister_mutex);
return -EBUSY;
}
}
timeri = snd_timer_instance_new(owner, timer);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
/* take a card refcount for safe disconnection */
if (timer->card)
get_device(&timer->card->card_dev);
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = slave_id;
if (list_empty(&timer->open_list_head) && timer->hw.open)
timer->hw.open(timer);
list_add_tail(&timeri->open_list, &timer->open_list_head);
snd_timer_check_master(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
/*
* close a timer instance
*/
int snd_timer_close(struct snd_timer_instance *timeri)
{
struct snd_timer *timer = NULL;
struct snd_timer_instance *slave, *tmp;
if (snd_BUG_ON(!timeri))
return -ENXIO;
mutex_lock(®ister_mutex);
list_del(&timeri->open_list);
/* force to stop the timer */
snd_timer_stop(timeri);
timer = timeri->timer;
if (timer) {
/* wait, until the active callback is finished */
spin_lock_irq(&timer->lock);
while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
spin_unlock_irq(&timer->lock);
udelay(10);
spin_lock_irq(&timer->lock);
}
spin_unlock_irq(&timer->lock);
/* remove slave links */
spin_lock_irq(&slave_active_lock);
spin_lock(&timer->lock);
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
open_list) {
list_move_tail(&slave->open_list, &snd_timer_slave_list);
slave->master = NULL;
slave->timer = NULL;
list_del_init(&slave->ack_list);
list_del_init(&slave->active_list);
}
spin_unlock(&timer->lock);
spin_unlock_irq(&slave_active_lock);
/* slave doesn't need to release timer resources below */
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
timer = NULL;
}
if (timeri->private_free)
timeri->private_free(timeri);
kfree(timeri->owner);
kfree(timeri);
if (timer) {
if (list_empty(&timer->open_list_head) && timer->hw.close)
timer->hw.close(timer);
/* release a card refcount for safe disconnection */
if (timer->card)
put_device(&timer->card->card_dev);
module_put(timer->module);
}
mutex_unlock(®ister_mutex);
return 0;
}
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
{
struct snd_timer * timer;
if (timeri == NULL)
return 0;
if ((timer = timeri->timer) != NULL) {
if (timer->hw.c_resolution)
return timer->hw.c_resolution(timer);
return timer->hw.resolution;
}
return 0;
}
static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
{
struct snd_timer *timer;
unsigned long resolution = 0;
struct snd_timer_instance *ts;
struct timespec tstamp;
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START ||
event > SNDRV_TIMER_EVENT_PAUSE))
return;
if (event == SNDRV_TIMER_EVENT_START ||
event == SNDRV_TIMER_EVENT_CONTINUE)
resolution = snd_timer_resolution(ti);
if (ti->ccallback)
ti->ccallback(ti, event, &tstamp, resolution);
if (ti->flags & SNDRV_TIMER_IFLG_SLAVE)
return;
timer = ti->timer;
if (timer == NULL)
return;
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
return;
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event + 100, &tstamp, resolution);
}
/* start/continue a master timer */
static int snd_timer_start1(struct snd_timer_instance *timeri,
bool start, unsigned long ticks)
{
struct snd_timer *timer;
int result;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (timer->card && timer->card->shutdown) {
result = -ENODEV;
goto unlock;
}
if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START)) {
result = -EBUSY;
goto unlock;
}
if (start)
timeri->ticks = timeri->cticks = ticks;
else if (!timeri->cticks)
timeri->cticks = 1;
timeri->pticks = 0;
list_move_tail(&timeri->active_list, &timer->active_list_head);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
goto __start_now;
timer->flags |= SNDRV_TIMER_FLG_RESCHED;
timeri->flags |= SNDRV_TIMER_IFLG_START;
result = 1; /* delayed start */
} else {
if (start)
timer->sticks = ticks;
timer->hw.start(timer);
__start_now:
timer->running++;
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
result = 0;
}
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* start/continue a slave timer */
static int snd_timer_start_slave(struct snd_timer_instance *timeri,
bool start)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master && timeri->timer) {
spin_lock(&timeri->timer->lock);
list_add_tail(&timeri->active_list,
&timeri->master->slave_active_head);
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 1; /* delayed start */
}
/* stop/pause a master timer */
static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
{
struct snd_timer *timer;
int result = 0;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START))) {
result = -EBUSY;
goto unlock;
}
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if (timer->card && timer->card->shutdown)
goto unlock;
if (stop) {
timeri->cticks = timeri->ticks;
timeri->pticks = 0;
}
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
timer->hw.stop(timer);
if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
snd_timer_reschedule(timer, 0);
if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
}
}
timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* stop/pause a slave timer */
static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
if (timeri->timer) {
spin_lock(&timeri->timer->lock);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 0;
}
/*
* start the timer instance
*/
int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
{
if (timeri == NULL || ticks < 1)
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, true);
else
return snd_timer_start1(timeri, true, ticks);
}
/*
* stop the timer instance.
*
* do not call this from the timer callback!
*/
int snd_timer_stop(struct snd_timer_instance *timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, true);
else
return snd_timer_stop1(timeri, true);
}
/*
* start again.. the tick is kept.
*/
int snd_timer_continue(struct snd_timer_instance *timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, false);
else
return snd_timer_start1(timeri, false, 0);
}
/*
* pause.. remember the ticks left
*/
int snd_timer_pause(struct snd_timer_instance * timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, false);
else
return snd_timer_stop1(timeri, false);
}
/*
* reschedule the timer
*
* start pending instances and check the scheduling ticks.
* when the scheduling ticks is changed set CHANGE flag to reprogram the timer.
*/
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti;
unsigned long ticks = ~0UL;
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->flags & SNDRV_TIMER_IFLG_START) {
ti->flags &= ~SNDRV_TIMER_IFLG_START;
ti->flags |= SNDRV_TIMER_IFLG_RUNNING;
timer->running++;
}
if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) {
if (ticks > ti->cticks)
ticks = ti->cticks;
}
}
if (ticks == ~0UL) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
return;
}
if (ticks > timer->hw.ticks)
ticks = timer->hw.ticks;
if (ticks_left != ticks)
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
timer->sticks = ticks;
}
/*
* timer tasklet
*
*/
static void snd_timer_tasklet(unsigned long arg)
{
struct snd_timer *timer = (struct snd_timer *) arg;
struct snd_timer_instance *ti;
struct list_head *p;
unsigned long resolution, ticks;
unsigned long flags;
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* now process all callbacks */
while (!list_empty(&timer->sack_list_head)) {
p = timer->sack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
resolution = ti->resolution;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* timer interrupt
*
* ticks_left is usually equal to timer->sticks.
*
*/
void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti, *ts, *tmp;
unsigned long resolution, ticks;
struct list_head *p, *ack_list_head;
unsigned long flags;
int use_tasklet = 0;
if (timer == NULL)
return;
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* remember the current resolution */
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
/* loop for all active instances
* Here we cannot use list_for_each_entry because the active_list of a
* processed instance is relinked to done_list_head before the callback
* is called.
*/
list_for_each_entry_safe(ti, tmp, &timer->active_list_head,
active_list) {
if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING))
continue;
ti->pticks += ticks_left;
ti->resolution = resolution;
if (ti->cticks < ticks_left)
ti->cticks = 0;
else
ti->cticks -= ticks_left;
if (ti->cticks) /* not expired */
continue;
if (ti->flags & SNDRV_TIMER_IFLG_AUTO) {
ti->cticks = ti->ticks;
} else {
ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
--timer->running;
list_del_init(&ti->active_list);
}
if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
(ti->flags & SNDRV_TIMER_IFLG_FAST))
ack_list_head = &timer->ack_list_head;
else
ack_list_head = &timer->sack_list_head;
if (list_empty(&ti->ack_list))
list_add_tail(&ti->ack_list, ack_list_head);
list_for_each_entry(ts, &ti->slave_active_head, active_list) {
ts->pticks = ti->pticks;
ts->resolution = resolution;
if (list_empty(&ts->ack_list))
list_add_tail(&ts->ack_list, ack_list_head);
}
}
if (timer->flags & SNDRV_TIMER_FLG_RESCHED)
snd_timer_reschedule(timer, timer->sticks);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_STOP) {
timer->hw.stop(timer);
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
}
if (!(timer->hw.flags & SNDRV_TIMER_HW_AUTO) ||
(timer->flags & SNDRV_TIMER_FLG_CHANGE)) {
/* restart timer */
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
} else {
timer->hw.stop(timer);
}
/* now process all fast callbacks */
while (!list_empty(&timer->ack_list_head)) {
p = timer->ack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
/* do we have any slow callbacks? */
use_tasklet = !list_empty(&timer->sack_list_head);
spin_unlock_irqrestore(&timer->lock, flags);
if (use_tasklet)
tasklet_schedule(&timer->task_queue);
}
/*
*/
int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
struct snd_timer **rtimer)
{
struct snd_timer *timer;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_timer_dev_free,
.dev_register = snd_timer_dev_register,
.dev_disconnect = snd_timer_dev_disconnect,
};
if (snd_BUG_ON(!tid))
return -EINVAL;
if (rtimer)
*rtimer = NULL;
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
timer->tmr_class = tid->dev_class;
timer->card = card;
timer->tmr_device = tid->device;
timer->tmr_subdevice = tid->subdevice;
if (id)
strlcpy(timer->id, id, sizeof(timer->id));
INIT_LIST_HEAD(&timer->device_list);
INIT_LIST_HEAD(&timer->open_list_head);
INIT_LIST_HEAD(&timer->active_list_head);
INIT_LIST_HEAD(&timer->ack_list_head);
INIT_LIST_HEAD(&timer->sack_list_head);
spin_lock_init(&timer->lock);
tasklet_init(&timer->task_queue, snd_timer_tasklet,
(unsigned long)timer);
if (card != NULL) {
timer->module = card->module;
err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
if (err < 0) {
snd_timer_free(timer);
return err;
}
}
if (rtimer)
*rtimer = timer;
return 0;
}
static int snd_timer_free(struct snd_timer *timer)
{
if (!timer)
return 0;
mutex_lock(®ister_mutex);
if (! list_empty(&timer->open_list_head)) {
struct list_head *p, *n;
struct snd_timer_instance *ti;
pr_warn("ALSA: timer %p is busy?\n", timer);
list_for_each_safe(p, n, &timer->open_list_head) {
list_del_init(p);
ti = list_entry(p, struct snd_timer_instance, open_list);
ti->timer = NULL;
}
}
list_del(&timer->device_list);
mutex_unlock(®ister_mutex);
if (timer->private_free)
timer->private_free(timer);
kfree(timer);
return 0;
}
static int snd_timer_dev_free(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
return snd_timer_free(timer);
}
static int snd_timer_dev_register(struct snd_device *dev)
{
struct snd_timer *timer = dev->device_data;
struct snd_timer *timer1;
if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop))
return -ENXIO;
if (!(timer->hw.flags & SNDRV_TIMER_HW_SLAVE) &&
!timer->hw.resolution && timer->hw.c_resolution == NULL)
return -EINVAL;
mutex_lock(®ister_mutex);
list_for_each_entry(timer1, &snd_timer_list, device_list) {
if (timer1->tmr_class > timer->tmr_class)
break;
if (timer1->tmr_class < timer->tmr_class)
continue;
if (timer1->card && timer->card) {
if (timer1->card->number > timer->card->number)
break;
if (timer1->card->number < timer->card->number)
continue;
}
if (timer1->tmr_device > timer->tmr_device)
break;
if (timer1->tmr_device < timer->tmr_device)
continue;
if (timer1->tmr_subdevice > timer->tmr_subdevice)
break;
if (timer1->tmr_subdevice < timer->tmr_subdevice)
continue;
/* conflicts.. */
mutex_unlock(®ister_mutex);
return -EBUSY;
}
list_add_tail(&timer->device_list, &timer1->device_list);
mutex_unlock(®ister_mutex);
return 0;
}
static int snd_timer_dev_disconnect(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_del_init(&timer->device_list);
/* wake up pending sleepers */
list_for_each_entry(ti, &timer->open_list_head, open_list) {
if (ti->disconnect)
ti->disconnect(ti);
}
mutex_unlock(®ister_mutex);
return 0;
}
void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp)
{
unsigned long flags;
unsigned long resolution = 0;
struct snd_timer_instance *ti, *ts;
if (timer->card && timer->card->shutdown)
return;
if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
return;
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
event > SNDRV_TIMER_EVENT_MRESUME))
return;
spin_lock_irqsave(&timer->lock, flags);
if (event == SNDRV_TIMER_EVENT_MSTART ||
event == SNDRV_TIMER_EVENT_MCONTINUE ||
event == SNDRV_TIMER_EVENT_MRESUME) {
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
}
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->ccallback)
ti->ccallback(ti, event, tstamp, resolution);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event, tstamp, resolution);
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* exported functions for global timers
*/
int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer)
{
struct snd_timer_id tid;
tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL;
tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE;
tid.card = -1;
tid.device = device;
tid.subdevice = 0;
return snd_timer_new(NULL, id, &tid, rtimer);
}
int snd_timer_global_free(struct snd_timer *timer)
{
return snd_timer_free(timer);
}
int snd_timer_global_register(struct snd_timer *timer)
{
struct snd_device dev;
memset(&dev, 0, sizeof(dev));
dev.device_data = timer;
return snd_timer_dev_register(&dev);
}
/*
* System timer
*/
struct snd_timer_system_private {
struct timer_list tlist;
unsigned long last_expires;
unsigned long last_jiffies;
unsigned long correction;
};
static void snd_timer_s_function(unsigned long data)
{
struct snd_timer *timer = (struct snd_timer *)data;
struct snd_timer_system_private *priv = timer->private_data;
unsigned long jiff = jiffies;
if (time_after(jiff, priv->last_expires))
priv->correction += (long)jiff - (long)priv->last_expires;
snd_timer_interrupt(timer, (long)jiff - (long)priv->last_jiffies);
}
static int snd_timer_s_start(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long njiff;
priv = (struct snd_timer_system_private *) timer->private_data;
njiff = (priv->last_jiffies = jiffies);
if (priv->correction > timer->sticks - 1) {
priv->correction -= timer->sticks - 1;
njiff++;
} else {
njiff += timer->sticks - priv->correction;
priv->correction = 0;
}
priv->last_expires = njiff;
mod_timer(&priv->tlist, njiff);
return 0;
}
static int snd_timer_s_stop(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long jiff;
priv = (struct snd_timer_system_private *) timer->private_data;
del_timer(&priv->tlist);
jiff = jiffies;
if (time_before(jiff, priv->last_expires))
timer->sticks = priv->last_expires - jiff;
else
timer->sticks = 1;
priv->correction = 0;
return 0;
}
static int snd_timer_s_close(struct snd_timer *timer)
{
struct snd_timer_system_private *priv;
priv = (struct snd_timer_system_private *)timer->private_data;
del_timer_sync(&priv->tlist);
return 0;
}
static struct snd_timer_hardware snd_timer_system =
{
.flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET,
.resolution = 1000000000L / HZ,
.ticks = 10000000L,
.close = snd_timer_s_close,
.start = snd_timer_s_start,
.stop = snd_timer_s_stop
};
static void snd_timer_free_system(struct snd_timer *timer)
{
kfree(timer->private_data);
}
static int snd_timer_register_system(void)
{
struct snd_timer *timer;
struct snd_timer_system_private *priv;
int err;
err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer);
if (err < 0)
return err;
strcpy(timer->name, "system timer");
timer->hw = snd_timer_system;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL) {
snd_timer_free(timer);
return -ENOMEM;
}
setup_timer(&priv->tlist, snd_timer_s_function, (unsigned long) timer);
timer->private_data = priv;
timer->private_free = snd_timer_free_system;
return snd_timer_global_register(timer);
}
#ifdef CONFIG_SND_PROC_FS
/*
* Info interface
*/
static void snd_timer_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_timer *timer;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->card && timer->card->shutdown)
continue;
switch (timer->tmr_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
snd_iprintf(buffer, "G%i: ", timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_CARD:
snd_iprintf(buffer, "C%i-%i: ",
timer->card->number, timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_PCM:
snd_iprintf(buffer, "P%i-%i-%i: ", timer->card->number,
timer->tmr_device, timer->tmr_subdevice);
break;
default:
snd_iprintf(buffer, "?%i-%i-%i-%i: ", timer->tmr_class,
timer->card ? timer->card->number : -1,
timer->tmr_device, timer->tmr_subdevice);
}
snd_iprintf(buffer, "%s :", timer->name);
if (timer->hw.resolution)
snd_iprintf(buffer, " %lu.%03luus (%lu ticks)",
timer->hw.resolution / 1000,
timer->hw.resolution % 1000,
timer->hw.ticks);
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
snd_iprintf(buffer, " SLAVE");
snd_iprintf(buffer, "\n");
list_for_each_entry(ti, &timer->open_list_head, open_list)
snd_iprintf(buffer, " Client %s : %s\n",
ti->owner ? ti->owner : "unknown",
ti->flags & (SNDRV_TIMER_IFLG_START |
SNDRV_TIMER_IFLG_RUNNING)
? "running" : "stopped");
}
mutex_unlock(®ister_mutex);
}
static struct snd_info_entry *snd_timer_proc_entry;
static void __init snd_timer_proc_init(void)
{
struct snd_info_entry *entry;
entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL);
if (entry != NULL) {
entry->c.text.read = snd_timer_proc_read;
if (snd_info_register(entry) < 0) {
snd_info_free_entry(entry);
entry = NULL;
}
}
snd_timer_proc_entry = entry;
}
static void __exit snd_timer_proc_done(void)
{
snd_info_free_entry(snd_timer_proc_entry);
}
#else /* !CONFIG_SND_PROC_FS */
#define snd_timer_proc_init()
#define snd_timer_proc_done()
#endif
/*
* USER SPACE interface
*/
static void snd_timer_user_interrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_read *r;
int prev;
spin_lock(&tu->qlock);
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->queue[prev];
if (r->resolution == resolution) {
r->ticks += ticks;
goto __wake;
}
}
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
r = &tu->queue[tu->qtail++];
tu->qtail %= tu->queue_size;
r->resolution = resolution;
r->ticks = ticks;
tu->qused++;
}
__wake:
spin_unlock(&tu->qlock);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_append_to_tqueue(struct snd_timer_user *tu,
struct snd_timer_tread *tread)
{
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread));
tu->qtail %= tu->queue_size;
tu->qused++;
}
}
static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
int event,
struct timespec *tstamp,
unsigned long resolution)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread r1;
unsigned long flags;
if (event >= SNDRV_TIMER_EVENT_START &&
event <= SNDRV_TIMER_EVENT_PAUSE)
tu->tstamp = *tstamp;
if ((tu->filter & (1 << event)) == 0 || !tu->tread)
return;
memset(&r1, 0, sizeof(r1));
r1.event = event;
r1.tstamp = *tstamp;
r1.val = resolution;
spin_lock_irqsave(&tu->qlock, flags);
snd_timer_user_append_to_tqueue(tu, &r1);
spin_unlock_irqrestore(&tu->qlock, flags);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_disconnect(struct snd_timer_instance *timeri)
{
struct snd_timer_user *tu = timeri->callback_data;
tu->disconnected = true;
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread *r, r1;
struct timespec tstamp;
int prev, append = 0;
memset(&tstamp, 0, sizeof(tstamp));
spin_lock(&tu->qlock);
if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) |
(1 << SNDRV_TIMER_EVENT_TICK))) == 0) {
spin_unlock(&tu->qlock);
return;
}
if (tu->last_resolution != resolution || ticks > 0) {
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
tu->last_resolution != resolution) {
memset(&r1, 0, sizeof(r1));
r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
r1.tstamp = tstamp;
r1.val = resolution;
snd_timer_user_append_to_tqueue(tu, &r1);
tu->last_resolution = resolution;
append++;
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0)
goto __wake;
if (ticks == 0)
goto __wake;
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->tqueue[prev];
if (r->event == SNDRV_TIMER_EVENT_TICK) {
r->tstamp = tstamp;
r->val += ticks;
append++;
goto __wake;
}
}
r1.event = SNDRV_TIMER_EVENT_TICK;
r1.tstamp = tstamp;
r1.val = ticks;
snd_timer_user_append_to_tqueue(tu, &r1);
append++;
__wake:
spin_unlock(&tu->qlock);
if (append == 0)
return;
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static int snd_timer_user_open(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (tu == NULL)
return -ENOMEM;
spin_lock_init(&tu->qlock);
init_waitqueue_head(&tu->qchange_sleep);
mutex_init(&tu->ioctl_lock);
tu->ticks = 1;
tu->queue_size = 128;
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL) {
kfree(tu);
return -ENOMEM;
}
file->private_data = tu;
return 0;
}
static int snd_timer_user_release(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
if (file->private_data) {
tu = file->private_data;
file->private_data = NULL;
mutex_lock(&tu->ioctl_lock);
if (tu->timeri)
snd_timer_close(tu->timeri);
mutex_unlock(&tu->ioctl_lock);
kfree(tu->queue);
kfree(tu->tqueue);
kfree(tu);
}
return 0;
}
static void snd_timer_user_zero_id(struct snd_timer_id *id)
{
id->dev_class = SNDRV_TIMER_CLASS_NONE;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = -1;
id->device = -1;
id->subdevice = -1;
}
static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer)
{
id->dev_class = timer->tmr_class;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = timer->card ? timer->card->number : -1;
id->device = timer->tmr_device;
id->subdevice = timer->tmr_subdevice;
}
static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
{
struct snd_timer_id id;
struct snd_timer *timer;
struct list_head *p;
if (copy_from_user(&id, _tid, sizeof(id)))
return -EFAULT;
mutex_lock(®ister_mutex);
if (id.dev_class < 0) { /* first item */
if (list_empty(&snd_timer_list))
snd_timer_user_zero_id(&id);
else {
timer = list_entry(snd_timer_list.next,
struct snd_timer, device_list);
snd_timer_user_copy_id(&id, timer);
}
} else {
switch (id.dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
id.device = id.device < 0 ? 0 : id.device + 1;
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device >= id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (id.card < 0) {
id.card = 0;
} else {
if (id.card < 0) {
id.card = 0;
} else {
if (id.device < 0) {
id.device = 0;
} else {
if (id.subdevice < 0) {
id.subdevice = 0;
} else {
id.subdevice++;
}
}
}
}
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > id.dev_class) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_class < id.dev_class)
continue;
if (timer->card->number > id.card) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->card->number < id.card)
continue;
if (timer->tmr_device > id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device < id.device)
continue;
if (timer->tmr_subdevice > id.subdevice) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_subdevice < id.subdevice)
continue;
snd_timer_user_copy_id(&id, timer);
break;
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
default:
snd_timer_user_zero_id(&id);
}
}
mutex_unlock(®ister_mutex);
if (copy_to_user(_tid, &id, sizeof(*_tid)))
return -EFAULT;
return 0;
}
static int snd_timer_user_ginfo(struct file *file,
struct snd_timer_ginfo __user *_ginfo)
{
struct snd_timer_ginfo *ginfo;
struct snd_timer_id tid;
struct snd_timer *t;
struct list_head *p;
int err = 0;
ginfo = memdup_user(_ginfo, sizeof(*ginfo));
if (IS_ERR(ginfo))
return PTR_ERR(ginfo);
tid = ginfo->tid;
memset(ginfo, 0, sizeof(*ginfo));
ginfo->tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
ginfo->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
ginfo->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(ginfo->id, t->id, sizeof(ginfo->id));
strlcpy(ginfo->name, t->name, sizeof(ginfo->name));
ginfo->resolution = t->hw.resolution;
if (t->hw.resolution_min > 0) {
ginfo->resolution_min = t->hw.resolution_min;
ginfo->resolution_max = t->hw.resolution_max;
}
list_for_each(p, &t->open_list_head) {
ginfo->clients++;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_ginfo, ginfo, sizeof(*ginfo)))
err = -EFAULT;
kfree(ginfo);
return err;
}
static int timer_set_gparams(struct snd_timer_gparams *gparams)
{
struct snd_timer *t;
int err;
mutex_lock(®ister_mutex);
t = snd_timer_find(&gparams->tid);
if (!t) {
err = -ENODEV;
goto _error;
}
if (!list_empty(&t->open_list_head)) {
err = -EBUSY;
goto _error;
}
if (!t->hw.set_period) {
err = -ENOSYS;
goto _error;
}
err = t->hw.set_period(t, gparams->period_num, gparams->period_den);
_error:
mutex_unlock(®ister_mutex);
return err;
}
static int snd_timer_user_gparams(struct file *file,
struct snd_timer_gparams __user *_gparams)
{
struct snd_timer_gparams gparams;
if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
return -EFAULT;
return timer_set_gparams(&gparams);
}
static int snd_timer_user_gstatus(struct file *file,
struct snd_timer_gstatus __user *_gstatus)
{
struct snd_timer_gstatus gstatus;
struct snd_timer_id tid;
struct snd_timer *t;
int err = 0;
if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus)))
return -EFAULT;
tid = gstatus.tid;
memset(&gstatus, 0, sizeof(gstatus));
gstatus.tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
if (t->hw.c_resolution)
gstatus.resolution = t->hw.c_resolution(t);
else
gstatus.resolution = t->hw.resolution;
if (t->hw.precise_resolution) {
t->hw.precise_resolution(t, &gstatus.resolution_num,
&gstatus.resolution_den);
} else {
gstatus.resolution_num = gstatus.resolution;
gstatus.resolution_den = 1000000000uL;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus)))
err = -EFAULT;
return err;
}
static int snd_timer_user_tselect(struct file *file,
struct snd_timer_select __user *_tselect)
{
struct snd_timer_user *tu;
struct snd_timer_select tselect;
char str[32];
int err = 0;
tu = file->private_data;
if (tu->timeri) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
err = -EFAULT;
goto __err;
}
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
if (err < 0)
goto __err;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
tu->tqueue = NULL;
if (tu->tread) {
tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread),
GFP_KERNEL);
if (tu->tqueue == NULL)
err = -ENOMEM;
} else {
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL)
err = -ENOMEM;
}
if (err < 0) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
} else {
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
? snd_timer_user_tinterrupt : snd_timer_user_interrupt;
tu->timeri->ccallback = snd_timer_user_ccallback;
tu->timeri->callback_data = (void *)tu;
tu->timeri->disconnect = snd_timer_user_disconnect;
}
__err:
return err;
}
static int snd_timer_user_info(struct file *file,
struct snd_timer_info __user *_info)
{
struct snd_timer_user *tu;
struct snd_timer_info *info;
struct snd_timer *t;
int err = 0;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (! info)
return -ENOMEM;
info->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
info->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(info->id, t->id, sizeof(info->id));
strlcpy(info->name, t->name, sizeof(info->name));
info->resolution = t->hw.resolution;
if (copy_to_user(_info, info, sizeof(*_info)))
err = -EFAULT;
kfree(info);
return err;
}
static int snd_timer_user_params(struct file *file,
struct snd_timer_params __user *_params)
{
struct snd_timer_user *tu;
struct snd_timer_params params;
struct snd_timer *t;
struct snd_timer_read *tr;
struct snd_timer_tread *ttr;
int err;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
if (copy_from_user(¶ms, _params, sizeof(params)))
return -EFAULT;
if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) {
err = -EINVAL;
goto _end;
}
if (params.queue_size > 0 &&
(params.queue_size < 32 || params.queue_size > 1024)) {
err = -EINVAL;
goto _end;
}
if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)|
(1<<SNDRV_TIMER_EVENT_TICK)|
(1<<SNDRV_TIMER_EVENT_START)|
(1<<SNDRV_TIMER_EVENT_STOP)|
(1<<SNDRV_TIMER_EVENT_CONTINUE)|
(1<<SNDRV_TIMER_EVENT_PAUSE)|
(1<<SNDRV_TIMER_EVENT_SUSPEND)|
(1<<SNDRV_TIMER_EVENT_RESUME)|
(1<<SNDRV_TIMER_EVENT_MSTART)|
(1<<SNDRV_TIMER_EVENT_MSTOP)|
(1<<SNDRV_TIMER_EVENT_MCONTINUE)|
(1<<SNDRV_TIMER_EVENT_MPAUSE)|
(1<<SNDRV_TIMER_EVENT_MSUSPEND)|
(1<<SNDRV_TIMER_EVENT_MRESUME))) {
err = -EINVAL;
goto _end;
}
snd_timer_stop(tu->timeri);
spin_lock_irq(&t->lock);
tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO|
SNDRV_TIMER_IFLG_EXCLUSIVE|
SNDRV_TIMER_IFLG_EARLY_EVENT);
if (params.flags & SNDRV_TIMER_PSFLG_AUTO)
tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO;
if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE;
if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT;
spin_unlock_irq(&t->lock);
if (params.queue_size > 0 &&
(unsigned int)tu->queue_size != params.queue_size) {
if (tu->tread) {
ttr = kmalloc(params.queue_size * sizeof(*ttr),
GFP_KERNEL);
if (ttr) {
kfree(tu->tqueue);
tu->queue_size = params.queue_size;
tu->tqueue = ttr;
}
} else {
tr = kmalloc(params.queue_size * sizeof(*tr),
GFP_KERNEL);
if (tr) {
kfree(tu->queue);
tu->queue_size = params.queue_size;
tu->queue = tr;
}
}
}
tu->qhead = tu->qtail = tu->qused = 0;
if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
if (tu->tread) {
struct snd_timer_tread tread;
memset(&tread, 0, sizeof(tread));
tread.event = SNDRV_TIMER_EVENT_EARLY;
tread.tstamp.tv_sec = 0;
tread.tstamp.tv_nsec = 0;
tread.val = 0;
snd_timer_user_append_to_tqueue(tu, &tread);
} else {
struct snd_timer_read *r = &tu->queue[0];
r->resolution = 0;
r->ticks = 0;
tu->qused++;
tu->qtail++;
}
}
tu->filter = params.filter;
tu->ticks = params.ticks;
err = 0;
_end:
if (copy_to_user(_params, ¶ms, sizeof(params)))
return -EFAULT;
return err;
}
static int snd_timer_user_status(struct file *file,
struct snd_timer_status __user *_status)
{
struct snd_timer_user *tu;
struct snd_timer_status status;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
memset(&status, 0, sizeof(status));
status.tstamp = tu->tstamp;
status.resolution = snd_timer_resolution(tu->timeri);
status.lost = tu->timeri->lost;
status.overrun = tu->overrun;
spin_lock_irq(&tu->qlock);
status.queue = tu->qused;
spin_unlock_irq(&tu->qlock);
if (copy_to_user(_status, &status, sizeof(status)))
return -EFAULT;
return 0;
}
static int snd_timer_user_start(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
snd_timer_stop(tu->timeri);
tu->timeri->lost = 0;
tu->last_resolution = 0;
return (err = snd_timer_start(tu->timeri, tu->ticks)) < 0 ? err : 0;
}
static int snd_timer_user_stop(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_stop(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_continue(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
tu->timeri->lost = 0;
return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_pause(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_pause(tu->timeri)) < 0 ? err : 0;
}
enum {
SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20),
SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21),
SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22),
SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
};
static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu;
void __user *argp = (void __user *)arg;
int __user *p = argp;
tu = file->private_data;
switch (cmd) {
case SNDRV_TIMER_IOCTL_PVERSION:
return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0;
case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
return snd_timer_user_next_device(argp);
case SNDRV_TIMER_IOCTL_TREAD:
{
int xarg;
if (tu->timeri) /* too late */
return -EBUSY;
if (get_user(xarg, p))
return -EFAULT;
tu->tread = xarg ? 1 : 0;
return 0;
}
case SNDRV_TIMER_IOCTL_GINFO:
return snd_timer_user_ginfo(file, argp);
case SNDRV_TIMER_IOCTL_GPARAMS:
return snd_timer_user_gparams(file, argp);
case SNDRV_TIMER_IOCTL_GSTATUS:
return snd_timer_user_gstatus(file, argp);
case SNDRV_TIMER_IOCTL_SELECT:
return snd_timer_user_tselect(file, argp);
case SNDRV_TIMER_IOCTL_INFO:
return snd_timer_user_info(file, argp);
case SNDRV_TIMER_IOCTL_PARAMS:
return snd_timer_user_params(file, argp);
case SNDRV_TIMER_IOCTL_STATUS:
return snd_timer_user_status(file, argp);
case SNDRV_TIMER_IOCTL_START:
case SNDRV_TIMER_IOCTL_START_OLD:
return snd_timer_user_start(file);
case SNDRV_TIMER_IOCTL_STOP:
case SNDRV_TIMER_IOCTL_STOP_OLD:
return snd_timer_user_stop(file);
case SNDRV_TIMER_IOCTL_CONTINUE:
case SNDRV_TIMER_IOCTL_CONTINUE_OLD:
return snd_timer_user_continue(file);
case SNDRV_TIMER_IOCTL_PAUSE:
case SNDRV_TIMER_IOCTL_PAUSE_OLD:
return snd_timer_user_pause(file);
}
return -ENOTTY;
}
static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu = file->private_data;
long ret;
mutex_lock(&tu->ioctl_lock);
ret = __snd_timer_user_ioctl(file, cmd, arg);
mutex_unlock(&tu->ioctl_lock);
return ret;
}
static int snd_timer_user_fasync(int fd, struct file * file, int on)
{
struct snd_timer_user *tu;
tu = file->private_data;
return fasync_helper(fd, file, on, &tu->fasync);
}
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
size_t count, loff_t *offset)
{
struct snd_timer_user *tu;
long result = 0, unit;
int qhead;
int err = 0;
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto _error;
}
set_current_state(TASK_INTERRUPTIBLE);
init_waitqueue_entry(&wait, current);
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
schedule();
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
if (tu->disconnected) {
err = -ENODEV;
goto _error;
}
if (signal_pending(current)) {
err = -ERESTARTSYS;
goto _error;
}
}
qhead = tu->qhead++;
tu->qhead %= tu->queue_size;
spin_unlock_irq(&tu->qlock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
err = -EFAULT;
} else {
if (copy_to_user(buffer, &tu->queue[qhead],
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
spin_lock_irq(&tu->qlock);
tu->qused--;
if (err < 0)
goto _error;
result += unit;
buffer += unit;
}
_error:
spin_unlock_irq(&tu->qlock);
return result > 0 ? result : err;
}
static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
{
unsigned int mask;
struct snd_timer_user *tu;
tu = file->private_data;
poll_wait(file, &tu->qchange_sleep, wait);
mask = 0;
if (tu->qused)
mask |= POLLIN | POLLRDNORM;
if (tu->disconnected)
mask |= POLLERR;
return mask;
}
#ifdef CONFIG_COMPAT
#include "timer_compat.c"
#else
#define snd_timer_user_ioctl_compat NULL
#endif
static const struct file_operations snd_timer_f_ops =
{
.owner = THIS_MODULE,
.read = snd_timer_user_read,
.open = snd_timer_user_open,
.release = snd_timer_user_release,
.llseek = no_llseek,
.poll = snd_timer_user_poll,
.unlocked_ioctl = snd_timer_user_ioctl,
.compat_ioctl = snd_timer_user_ioctl_compat,
.fasync = snd_timer_user_fasync,
};
/* unregister the system timer */
static void snd_timer_free_all(void)
{
struct snd_timer *timer, *n;
list_for_each_entry_safe(timer, n, &snd_timer_list, device_list)
snd_timer_free(timer);
}
static struct device timer_dev;
/*
* ENTRY functions
*/
static int __init alsa_timer_init(void)
{
int err;
snd_device_initialize(&timer_dev, NULL);
dev_set_name(&timer_dev, "timer");
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1,
"system timer");
#endif
err = snd_timer_register_system();
if (err < 0) {
pr_err("ALSA: unable to register system timer (%i)\n", err);
put_device(&timer_dev);
return err;
}
err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0,
&snd_timer_f_ops, NULL, &timer_dev);
if (err < 0) {
pr_err("ALSA: unable to register timer device (%i)\n", err);
snd_timer_free_all();
put_device(&timer_dev);
return err;
}
snd_timer_proc_init();
return 0;
}
static void __exit alsa_timer_exit(void)
{
snd_unregister_device(&timer_dev);
snd_timer_free_all();
put_device(&timer_dev);
snd_timer_proc_done();
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1);
#endif
}
module_init(alsa_timer_init)
module_exit(alsa_timer_exit)
EXPORT_SYMBOL(snd_timer_open);
EXPORT_SYMBOL(snd_timer_close);
EXPORT_SYMBOL(snd_timer_resolution);
EXPORT_SYMBOL(snd_timer_start);
EXPORT_SYMBOL(snd_timer_stop);
EXPORT_SYMBOL(snd_timer_continue);
EXPORT_SYMBOL(snd_timer_pause);
EXPORT_SYMBOL(snd_timer_new);
EXPORT_SYMBOL(snd_timer_notify);
EXPORT_SYMBOL(snd_timer_global_new);
EXPORT_SYMBOL(snd_timer_global_free);
EXPORT_SYMBOL(snd_timer_global_register);
EXPORT_SYMBOL(snd_timer_interrupt);
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_5059_0 |
crossvul-cpp_data_good_5057_0 | /*
* Timers abstract layer
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/string.h>
#include <sound/core.h>
#include <sound/timer.h>
#include <sound/control.h>
#include <sound/info.h>
#include <sound/minors.h>
#include <sound/initval.h>
#include <linux/kmod.h>
#if IS_ENABLED(CONFIG_SND_HRTIMER)
#define DEFAULT_TIMER_LIMIT 4
#else
#define DEFAULT_TIMER_LIMIT 1
#endif
static int timer_limit = DEFAULT_TIMER_LIMIT;
static int timer_tstamp_monotonic = 1;
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>");
MODULE_DESCRIPTION("ALSA timer interface");
MODULE_LICENSE("GPL");
module_param(timer_limit, int, 0444);
MODULE_PARM_DESC(timer_limit, "Maximum global timers in system.");
module_param(timer_tstamp_monotonic, int, 0444);
MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default).");
MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER);
MODULE_ALIAS("devname:snd/timer");
struct snd_timer_user {
struct snd_timer_instance *timeri;
int tread; /* enhanced read with timestamps and events */
unsigned long ticks;
unsigned long overrun;
int qhead;
int qtail;
int qused;
int queue_size;
bool disconnected;
struct snd_timer_read *queue;
struct snd_timer_tread *tqueue;
spinlock_t qlock;
unsigned long last_resolution;
unsigned int filter;
struct timespec tstamp; /* trigger tstamp */
wait_queue_head_t qchange_sleep;
struct fasync_struct *fasync;
struct mutex ioctl_lock;
};
/* list of timers */
static LIST_HEAD(snd_timer_list);
/* list of slave instances */
static LIST_HEAD(snd_timer_slave_list);
/* lock for slave active lists */
static DEFINE_SPINLOCK(slave_active_lock);
static DEFINE_MUTEX(register_mutex);
static int snd_timer_free(struct snd_timer *timer);
static int snd_timer_dev_free(struct snd_device *device);
static int snd_timer_dev_register(struct snd_device *device);
static int snd_timer_dev_disconnect(struct snd_device *device);
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left);
/*
* create a timer instance with the given owner string.
* when timer is not NULL, increments the module counter
*/
static struct snd_timer_instance *snd_timer_instance_new(char *owner,
struct snd_timer *timer)
{
struct snd_timer_instance *timeri;
timeri = kzalloc(sizeof(*timeri), GFP_KERNEL);
if (timeri == NULL)
return NULL;
timeri->owner = kstrdup(owner, GFP_KERNEL);
if (! timeri->owner) {
kfree(timeri);
return NULL;
}
INIT_LIST_HEAD(&timeri->open_list);
INIT_LIST_HEAD(&timeri->active_list);
INIT_LIST_HEAD(&timeri->ack_list);
INIT_LIST_HEAD(&timeri->slave_list_head);
INIT_LIST_HEAD(&timeri->slave_active_head);
timeri->timer = timer;
if (timer && !try_module_get(timer->module)) {
kfree(timeri->owner);
kfree(timeri);
return NULL;
}
return timeri;
}
/*
* find a timer instance from the given timer id
*/
static struct snd_timer *snd_timer_find(struct snd_timer_id *tid)
{
struct snd_timer *timer = NULL;
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->tmr_class != tid->dev_class)
continue;
if ((timer->tmr_class == SNDRV_TIMER_CLASS_CARD ||
timer->tmr_class == SNDRV_TIMER_CLASS_PCM) &&
(timer->card == NULL ||
timer->card->number != tid->card))
continue;
if (timer->tmr_device != tid->device)
continue;
if (timer->tmr_subdevice != tid->subdevice)
continue;
return timer;
}
return NULL;
}
#ifdef CONFIG_MODULES
static void snd_timer_request(struct snd_timer_id *tid)
{
switch (tid->dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
if (tid->device < timer_limit)
request_module("snd-timer-%i", tid->device);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (tid->card < snd_ecards_limit)
request_module("snd-card-%i", tid->card);
break;
default:
break;
}
}
#endif
/*
* look for a master instance matching with the slave id of the given slave.
* when found, relink the open_link of the slave.
*
* call this with register_mutex down.
*/
static void snd_timer_check_slave(struct snd_timer_instance *slave)
{
struct snd_timer *timer;
struct snd_timer_instance *master;
/* FIXME: it's really dumb to look up all entries.. */
list_for_each_entry(timer, &snd_timer_list, device_list) {
list_for_each_entry(master, &timer->open_list_head, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list,
&master->slave_list_head);
spin_lock_irq(&slave_active_lock);
slave->master = master;
slave->timer = master->timer;
spin_unlock_irq(&slave_active_lock);
return;
}
}
}
}
/*
* look for slave instances matching with the slave id of the given master.
* when found, relink the open_link of slaves.
*
* call this with register_mutex down.
*/
static void snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
/* check all pending slaves */
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list, &master->slave_list_head);
spin_lock_irq(&slave_active_lock);
spin_lock(&master->timer->lock);
slave->master = master;
slave->timer = master->timer;
if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
list_add_tail(&slave->active_list,
&master->slave_active_head);
spin_unlock(&master->timer->lock);
spin_unlock_irq(&slave_active_lock);
}
}
}
/*
* open a timer instance
* when opening a master, the slave id must be here given.
*/
int snd_timer_open(struct snd_timer_instance **ti,
char *owner, struct snd_timer_id *tid,
unsigned int slave_id)
{
struct snd_timer *timer;
struct snd_timer_instance *timeri = NULL;
if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
/* open a slave instance */
if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE ||
tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) {
pr_debug("ALSA: timer: invalid slave class %i\n",
tid->dev_sclass);
return -EINVAL;
}
mutex_lock(®ister_mutex);
timeri = snd_timer_instance_new(owner, NULL);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
snd_timer_check_slave(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
/* open a master instance */
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
#ifdef CONFIG_MODULES
if (!timer) {
mutex_unlock(®ister_mutex);
snd_timer_request(tid);
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
}
#endif
if (!timer) {
mutex_unlock(®ister_mutex);
return -ENODEV;
}
if (!list_empty(&timer->open_list_head)) {
timeri = list_entry(timer->open_list_head.next,
struct snd_timer_instance, open_list);
if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
mutex_unlock(®ister_mutex);
return -EBUSY;
}
}
timeri = snd_timer_instance_new(owner, timer);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
/* take a card refcount for safe disconnection */
if (timer->card)
get_device(&timer->card->card_dev);
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = slave_id;
if (list_empty(&timer->open_list_head) && timer->hw.open)
timer->hw.open(timer);
list_add_tail(&timeri->open_list, &timer->open_list_head);
snd_timer_check_master(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
/*
* close a timer instance
*/
int snd_timer_close(struct snd_timer_instance *timeri)
{
struct snd_timer *timer = NULL;
struct snd_timer_instance *slave, *tmp;
if (snd_BUG_ON(!timeri))
return -ENXIO;
mutex_lock(®ister_mutex);
list_del(&timeri->open_list);
/* force to stop the timer */
snd_timer_stop(timeri);
timer = timeri->timer;
if (timer) {
/* wait, until the active callback is finished */
spin_lock_irq(&timer->lock);
while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
spin_unlock_irq(&timer->lock);
udelay(10);
spin_lock_irq(&timer->lock);
}
spin_unlock_irq(&timer->lock);
/* remove slave links */
spin_lock_irq(&slave_active_lock);
spin_lock(&timer->lock);
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
open_list) {
list_move_tail(&slave->open_list, &snd_timer_slave_list);
slave->master = NULL;
slave->timer = NULL;
list_del_init(&slave->ack_list);
list_del_init(&slave->active_list);
}
spin_unlock(&timer->lock);
spin_unlock_irq(&slave_active_lock);
/* slave doesn't need to release timer resources below */
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
timer = NULL;
}
if (timeri->private_free)
timeri->private_free(timeri);
kfree(timeri->owner);
kfree(timeri);
if (timer) {
if (list_empty(&timer->open_list_head) && timer->hw.close)
timer->hw.close(timer);
/* release a card refcount for safe disconnection */
if (timer->card)
put_device(&timer->card->card_dev);
module_put(timer->module);
}
mutex_unlock(®ister_mutex);
return 0;
}
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
{
struct snd_timer * timer;
if (timeri == NULL)
return 0;
if ((timer = timeri->timer) != NULL) {
if (timer->hw.c_resolution)
return timer->hw.c_resolution(timer);
return timer->hw.resolution;
}
return 0;
}
static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
{
struct snd_timer *timer;
unsigned long resolution = 0;
struct snd_timer_instance *ts;
struct timespec tstamp;
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START ||
event > SNDRV_TIMER_EVENT_PAUSE))
return;
if (event == SNDRV_TIMER_EVENT_START ||
event == SNDRV_TIMER_EVENT_CONTINUE)
resolution = snd_timer_resolution(ti);
if (ti->ccallback)
ti->ccallback(ti, event, &tstamp, resolution);
if (ti->flags & SNDRV_TIMER_IFLG_SLAVE)
return;
timer = ti->timer;
if (timer == NULL)
return;
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
return;
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event + 100, &tstamp, resolution);
}
/* start/continue a master timer */
static int snd_timer_start1(struct snd_timer_instance *timeri,
bool start, unsigned long ticks)
{
struct snd_timer *timer;
int result;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (timer->card && timer->card->shutdown) {
result = -ENODEV;
goto unlock;
}
if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START)) {
result = -EBUSY;
goto unlock;
}
if (start)
timeri->ticks = timeri->cticks = ticks;
else if (!timeri->cticks)
timeri->cticks = 1;
timeri->pticks = 0;
list_move_tail(&timeri->active_list, &timer->active_list_head);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
goto __start_now;
timer->flags |= SNDRV_TIMER_FLG_RESCHED;
timeri->flags |= SNDRV_TIMER_IFLG_START;
result = 1; /* delayed start */
} else {
if (start)
timer->sticks = ticks;
timer->hw.start(timer);
__start_now:
timer->running++;
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
result = 0;
}
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* start/continue a slave timer */
static int snd_timer_start_slave(struct snd_timer_instance *timeri,
bool start)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master && timeri->timer) {
spin_lock(&timeri->timer->lock);
list_add_tail(&timeri->active_list,
&timeri->master->slave_active_head);
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 1; /* delayed start */
}
/* stop/pause a master timer */
static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
{
struct snd_timer *timer;
int result = 0;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START))) {
result = -EBUSY;
goto unlock;
}
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if (timer->card && timer->card->shutdown)
goto unlock;
if (stop) {
timeri->cticks = timeri->ticks;
timeri->pticks = 0;
}
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
timer->hw.stop(timer);
if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
snd_timer_reschedule(timer, 0);
if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
}
}
timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* stop/pause a slave timer */
static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
if (timeri->timer) {
spin_lock(&timeri->timer->lock);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 0;
}
/*
* start the timer instance
*/
int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
{
if (timeri == NULL || ticks < 1)
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, true);
else
return snd_timer_start1(timeri, true, ticks);
}
/*
* stop the timer instance.
*
* do not call this from the timer callback!
*/
int snd_timer_stop(struct snd_timer_instance *timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, true);
else
return snd_timer_stop1(timeri, true);
}
/*
* start again.. the tick is kept.
*/
int snd_timer_continue(struct snd_timer_instance *timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, false);
else
return snd_timer_start1(timeri, false, 0);
}
/*
* pause.. remember the ticks left
*/
int snd_timer_pause(struct snd_timer_instance * timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, false);
else
return snd_timer_stop1(timeri, false);
}
/*
* reschedule the timer
*
* start pending instances and check the scheduling ticks.
* when the scheduling ticks is changed set CHANGE flag to reprogram the timer.
*/
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti;
unsigned long ticks = ~0UL;
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->flags & SNDRV_TIMER_IFLG_START) {
ti->flags &= ~SNDRV_TIMER_IFLG_START;
ti->flags |= SNDRV_TIMER_IFLG_RUNNING;
timer->running++;
}
if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) {
if (ticks > ti->cticks)
ticks = ti->cticks;
}
}
if (ticks == ~0UL) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
return;
}
if (ticks > timer->hw.ticks)
ticks = timer->hw.ticks;
if (ticks_left != ticks)
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
timer->sticks = ticks;
}
/*
* timer tasklet
*
*/
static void snd_timer_tasklet(unsigned long arg)
{
struct snd_timer *timer = (struct snd_timer *) arg;
struct snd_timer_instance *ti;
struct list_head *p;
unsigned long resolution, ticks;
unsigned long flags;
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* now process all callbacks */
while (!list_empty(&timer->sack_list_head)) {
p = timer->sack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
resolution = ti->resolution;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* timer interrupt
*
* ticks_left is usually equal to timer->sticks.
*
*/
void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti, *ts, *tmp;
unsigned long resolution, ticks;
struct list_head *p, *ack_list_head;
unsigned long flags;
int use_tasklet = 0;
if (timer == NULL)
return;
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* remember the current resolution */
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
/* loop for all active instances
* Here we cannot use list_for_each_entry because the active_list of a
* processed instance is relinked to done_list_head before the callback
* is called.
*/
list_for_each_entry_safe(ti, tmp, &timer->active_list_head,
active_list) {
if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING))
continue;
ti->pticks += ticks_left;
ti->resolution = resolution;
if (ti->cticks < ticks_left)
ti->cticks = 0;
else
ti->cticks -= ticks_left;
if (ti->cticks) /* not expired */
continue;
if (ti->flags & SNDRV_TIMER_IFLG_AUTO) {
ti->cticks = ti->ticks;
} else {
ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
--timer->running;
list_del_init(&ti->active_list);
}
if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
(ti->flags & SNDRV_TIMER_IFLG_FAST))
ack_list_head = &timer->ack_list_head;
else
ack_list_head = &timer->sack_list_head;
if (list_empty(&ti->ack_list))
list_add_tail(&ti->ack_list, ack_list_head);
list_for_each_entry(ts, &ti->slave_active_head, active_list) {
ts->pticks = ti->pticks;
ts->resolution = resolution;
if (list_empty(&ts->ack_list))
list_add_tail(&ts->ack_list, ack_list_head);
}
}
if (timer->flags & SNDRV_TIMER_FLG_RESCHED)
snd_timer_reschedule(timer, timer->sticks);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_STOP) {
timer->hw.stop(timer);
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
}
if (!(timer->hw.flags & SNDRV_TIMER_HW_AUTO) ||
(timer->flags & SNDRV_TIMER_FLG_CHANGE)) {
/* restart timer */
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
} else {
timer->hw.stop(timer);
}
/* now process all fast callbacks */
while (!list_empty(&timer->ack_list_head)) {
p = timer->ack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
/* do we have any slow callbacks? */
use_tasklet = !list_empty(&timer->sack_list_head);
spin_unlock_irqrestore(&timer->lock, flags);
if (use_tasklet)
tasklet_schedule(&timer->task_queue);
}
/*
*/
int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
struct snd_timer **rtimer)
{
struct snd_timer *timer;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_timer_dev_free,
.dev_register = snd_timer_dev_register,
.dev_disconnect = snd_timer_dev_disconnect,
};
if (snd_BUG_ON(!tid))
return -EINVAL;
if (rtimer)
*rtimer = NULL;
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
timer->tmr_class = tid->dev_class;
timer->card = card;
timer->tmr_device = tid->device;
timer->tmr_subdevice = tid->subdevice;
if (id)
strlcpy(timer->id, id, sizeof(timer->id));
INIT_LIST_HEAD(&timer->device_list);
INIT_LIST_HEAD(&timer->open_list_head);
INIT_LIST_HEAD(&timer->active_list_head);
INIT_LIST_HEAD(&timer->ack_list_head);
INIT_LIST_HEAD(&timer->sack_list_head);
spin_lock_init(&timer->lock);
tasklet_init(&timer->task_queue, snd_timer_tasklet,
(unsigned long)timer);
if (card != NULL) {
timer->module = card->module;
err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
if (err < 0) {
snd_timer_free(timer);
return err;
}
}
if (rtimer)
*rtimer = timer;
return 0;
}
static int snd_timer_free(struct snd_timer *timer)
{
if (!timer)
return 0;
mutex_lock(®ister_mutex);
if (! list_empty(&timer->open_list_head)) {
struct list_head *p, *n;
struct snd_timer_instance *ti;
pr_warn("ALSA: timer %p is busy?\n", timer);
list_for_each_safe(p, n, &timer->open_list_head) {
list_del_init(p);
ti = list_entry(p, struct snd_timer_instance, open_list);
ti->timer = NULL;
}
}
list_del(&timer->device_list);
mutex_unlock(®ister_mutex);
if (timer->private_free)
timer->private_free(timer);
kfree(timer);
return 0;
}
static int snd_timer_dev_free(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
return snd_timer_free(timer);
}
static int snd_timer_dev_register(struct snd_device *dev)
{
struct snd_timer *timer = dev->device_data;
struct snd_timer *timer1;
if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop))
return -ENXIO;
if (!(timer->hw.flags & SNDRV_TIMER_HW_SLAVE) &&
!timer->hw.resolution && timer->hw.c_resolution == NULL)
return -EINVAL;
mutex_lock(®ister_mutex);
list_for_each_entry(timer1, &snd_timer_list, device_list) {
if (timer1->tmr_class > timer->tmr_class)
break;
if (timer1->tmr_class < timer->tmr_class)
continue;
if (timer1->card && timer->card) {
if (timer1->card->number > timer->card->number)
break;
if (timer1->card->number < timer->card->number)
continue;
}
if (timer1->tmr_device > timer->tmr_device)
break;
if (timer1->tmr_device < timer->tmr_device)
continue;
if (timer1->tmr_subdevice > timer->tmr_subdevice)
break;
if (timer1->tmr_subdevice < timer->tmr_subdevice)
continue;
/* conflicts.. */
mutex_unlock(®ister_mutex);
return -EBUSY;
}
list_add_tail(&timer->device_list, &timer1->device_list);
mutex_unlock(®ister_mutex);
return 0;
}
static int snd_timer_dev_disconnect(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_del_init(&timer->device_list);
/* wake up pending sleepers */
list_for_each_entry(ti, &timer->open_list_head, open_list) {
if (ti->disconnect)
ti->disconnect(ti);
}
mutex_unlock(®ister_mutex);
return 0;
}
void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp)
{
unsigned long flags;
unsigned long resolution = 0;
struct snd_timer_instance *ti, *ts;
if (timer->card && timer->card->shutdown)
return;
if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
return;
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
event > SNDRV_TIMER_EVENT_MRESUME))
return;
spin_lock_irqsave(&timer->lock, flags);
if (event == SNDRV_TIMER_EVENT_MSTART ||
event == SNDRV_TIMER_EVENT_MCONTINUE ||
event == SNDRV_TIMER_EVENT_MRESUME) {
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
}
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->ccallback)
ti->ccallback(ti, event, tstamp, resolution);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event, tstamp, resolution);
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* exported functions for global timers
*/
int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer)
{
struct snd_timer_id tid;
tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL;
tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE;
tid.card = -1;
tid.device = device;
tid.subdevice = 0;
return snd_timer_new(NULL, id, &tid, rtimer);
}
int snd_timer_global_free(struct snd_timer *timer)
{
return snd_timer_free(timer);
}
int snd_timer_global_register(struct snd_timer *timer)
{
struct snd_device dev;
memset(&dev, 0, sizeof(dev));
dev.device_data = timer;
return snd_timer_dev_register(&dev);
}
/*
* System timer
*/
struct snd_timer_system_private {
struct timer_list tlist;
unsigned long last_expires;
unsigned long last_jiffies;
unsigned long correction;
};
static void snd_timer_s_function(unsigned long data)
{
struct snd_timer *timer = (struct snd_timer *)data;
struct snd_timer_system_private *priv = timer->private_data;
unsigned long jiff = jiffies;
if (time_after(jiff, priv->last_expires))
priv->correction += (long)jiff - (long)priv->last_expires;
snd_timer_interrupt(timer, (long)jiff - (long)priv->last_jiffies);
}
static int snd_timer_s_start(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long njiff;
priv = (struct snd_timer_system_private *) timer->private_data;
njiff = (priv->last_jiffies = jiffies);
if (priv->correction > timer->sticks - 1) {
priv->correction -= timer->sticks - 1;
njiff++;
} else {
njiff += timer->sticks - priv->correction;
priv->correction = 0;
}
priv->last_expires = njiff;
mod_timer(&priv->tlist, njiff);
return 0;
}
static int snd_timer_s_stop(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long jiff;
priv = (struct snd_timer_system_private *) timer->private_data;
del_timer(&priv->tlist);
jiff = jiffies;
if (time_before(jiff, priv->last_expires))
timer->sticks = priv->last_expires - jiff;
else
timer->sticks = 1;
priv->correction = 0;
return 0;
}
static int snd_timer_s_close(struct snd_timer *timer)
{
struct snd_timer_system_private *priv;
priv = (struct snd_timer_system_private *)timer->private_data;
del_timer_sync(&priv->tlist);
return 0;
}
static struct snd_timer_hardware snd_timer_system =
{
.flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET,
.resolution = 1000000000L / HZ,
.ticks = 10000000L,
.close = snd_timer_s_close,
.start = snd_timer_s_start,
.stop = snd_timer_s_stop
};
static void snd_timer_free_system(struct snd_timer *timer)
{
kfree(timer->private_data);
}
static int snd_timer_register_system(void)
{
struct snd_timer *timer;
struct snd_timer_system_private *priv;
int err;
err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer);
if (err < 0)
return err;
strcpy(timer->name, "system timer");
timer->hw = snd_timer_system;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL) {
snd_timer_free(timer);
return -ENOMEM;
}
setup_timer(&priv->tlist, snd_timer_s_function, (unsigned long) timer);
timer->private_data = priv;
timer->private_free = snd_timer_free_system;
return snd_timer_global_register(timer);
}
#ifdef CONFIG_SND_PROC_FS
/*
* Info interface
*/
static void snd_timer_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_timer *timer;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->card && timer->card->shutdown)
continue;
switch (timer->tmr_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
snd_iprintf(buffer, "G%i: ", timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_CARD:
snd_iprintf(buffer, "C%i-%i: ",
timer->card->number, timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_PCM:
snd_iprintf(buffer, "P%i-%i-%i: ", timer->card->number,
timer->tmr_device, timer->tmr_subdevice);
break;
default:
snd_iprintf(buffer, "?%i-%i-%i-%i: ", timer->tmr_class,
timer->card ? timer->card->number : -1,
timer->tmr_device, timer->tmr_subdevice);
}
snd_iprintf(buffer, "%s :", timer->name);
if (timer->hw.resolution)
snd_iprintf(buffer, " %lu.%03luus (%lu ticks)",
timer->hw.resolution / 1000,
timer->hw.resolution % 1000,
timer->hw.ticks);
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
snd_iprintf(buffer, " SLAVE");
snd_iprintf(buffer, "\n");
list_for_each_entry(ti, &timer->open_list_head, open_list)
snd_iprintf(buffer, " Client %s : %s\n",
ti->owner ? ti->owner : "unknown",
ti->flags & (SNDRV_TIMER_IFLG_START |
SNDRV_TIMER_IFLG_RUNNING)
? "running" : "stopped");
}
mutex_unlock(®ister_mutex);
}
static struct snd_info_entry *snd_timer_proc_entry;
static void __init snd_timer_proc_init(void)
{
struct snd_info_entry *entry;
entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL);
if (entry != NULL) {
entry->c.text.read = snd_timer_proc_read;
if (snd_info_register(entry) < 0) {
snd_info_free_entry(entry);
entry = NULL;
}
}
snd_timer_proc_entry = entry;
}
static void __exit snd_timer_proc_done(void)
{
snd_info_free_entry(snd_timer_proc_entry);
}
#else /* !CONFIG_SND_PROC_FS */
#define snd_timer_proc_init()
#define snd_timer_proc_done()
#endif
/*
* USER SPACE interface
*/
static void snd_timer_user_interrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_read *r;
int prev;
spin_lock(&tu->qlock);
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->queue[prev];
if (r->resolution == resolution) {
r->ticks += ticks;
goto __wake;
}
}
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
r = &tu->queue[tu->qtail++];
tu->qtail %= tu->queue_size;
r->resolution = resolution;
r->ticks = ticks;
tu->qused++;
}
__wake:
spin_unlock(&tu->qlock);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_append_to_tqueue(struct snd_timer_user *tu,
struct snd_timer_tread *tread)
{
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread));
tu->qtail %= tu->queue_size;
tu->qused++;
}
}
static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
int event,
struct timespec *tstamp,
unsigned long resolution)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread r1;
unsigned long flags;
if (event >= SNDRV_TIMER_EVENT_START &&
event <= SNDRV_TIMER_EVENT_PAUSE)
tu->tstamp = *tstamp;
if ((tu->filter & (1 << event)) == 0 || !tu->tread)
return;
r1.event = event;
r1.tstamp = *tstamp;
r1.val = resolution;
spin_lock_irqsave(&tu->qlock, flags);
snd_timer_user_append_to_tqueue(tu, &r1);
spin_unlock_irqrestore(&tu->qlock, flags);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_disconnect(struct snd_timer_instance *timeri)
{
struct snd_timer_user *tu = timeri->callback_data;
tu->disconnected = true;
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread *r, r1;
struct timespec tstamp;
int prev, append = 0;
memset(&tstamp, 0, sizeof(tstamp));
spin_lock(&tu->qlock);
if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) |
(1 << SNDRV_TIMER_EVENT_TICK))) == 0) {
spin_unlock(&tu->qlock);
return;
}
if (tu->last_resolution != resolution || ticks > 0) {
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
tu->last_resolution != resolution) {
r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
r1.tstamp = tstamp;
r1.val = resolution;
snd_timer_user_append_to_tqueue(tu, &r1);
tu->last_resolution = resolution;
append++;
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0)
goto __wake;
if (ticks == 0)
goto __wake;
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->tqueue[prev];
if (r->event == SNDRV_TIMER_EVENT_TICK) {
r->tstamp = tstamp;
r->val += ticks;
append++;
goto __wake;
}
}
r1.event = SNDRV_TIMER_EVENT_TICK;
r1.tstamp = tstamp;
r1.val = ticks;
snd_timer_user_append_to_tqueue(tu, &r1);
append++;
__wake:
spin_unlock(&tu->qlock);
if (append == 0)
return;
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static int snd_timer_user_open(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (tu == NULL)
return -ENOMEM;
spin_lock_init(&tu->qlock);
init_waitqueue_head(&tu->qchange_sleep);
mutex_init(&tu->ioctl_lock);
tu->ticks = 1;
tu->queue_size = 128;
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL) {
kfree(tu);
return -ENOMEM;
}
file->private_data = tu;
return 0;
}
static int snd_timer_user_release(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
if (file->private_data) {
tu = file->private_data;
file->private_data = NULL;
mutex_lock(&tu->ioctl_lock);
if (tu->timeri)
snd_timer_close(tu->timeri);
mutex_unlock(&tu->ioctl_lock);
kfree(tu->queue);
kfree(tu->tqueue);
kfree(tu);
}
return 0;
}
static void snd_timer_user_zero_id(struct snd_timer_id *id)
{
id->dev_class = SNDRV_TIMER_CLASS_NONE;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = -1;
id->device = -1;
id->subdevice = -1;
}
static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer)
{
id->dev_class = timer->tmr_class;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = timer->card ? timer->card->number : -1;
id->device = timer->tmr_device;
id->subdevice = timer->tmr_subdevice;
}
static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
{
struct snd_timer_id id;
struct snd_timer *timer;
struct list_head *p;
if (copy_from_user(&id, _tid, sizeof(id)))
return -EFAULT;
mutex_lock(®ister_mutex);
if (id.dev_class < 0) { /* first item */
if (list_empty(&snd_timer_list))
snd_timer_user_zero_id(&id);
else {
timer = list_entry(snd_timer_list.next,
struct snd_timer, device_list);
snd_timer_user_copy_id(&id, timer);
}
} else {
switch (id.dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
id.device = id.device < 0 ? 0 : id.device + 1;
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device >= id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (id.card < 0) {
id.card = 0;
} else {
if (id.card < 0) {
id.card = 0;
} else {
if (id.device < 0) {
id.device = 0;
} else {
if (id.subdevice < 0) {
id.subdevice = 0;
} else {
id.subdevice++;
}
}
}
}
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > id.dev_class) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_class < id.dev_class)
continue;
if (timer->card->number > id.card) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->card->number < id.card)
continue;
if (timer->tmr_device > id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device < id.device)
continue;
if (timer->tmr_subdevice > id.subdevice) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_subdevice < id.subdevice)
continue;
snd_timer_user_copy_id(&id, timer);
break;
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
default:
snd_timer_user_zero_id(&id);
}
}
mutex_unlock(®ister_mutex);
if (copy_to_user(_tid, &id, sizeof(*_tid)))
return -EFAULT;
return 0;
}
static int snd_timer_user_ginfo(struct file *file,
struct snd_timer_ginfo __user *_ginfo)
{
struct snd_timer_ginfo *ginfo;
struct snd_timer_id tid;
struct snd_timer *t;
struct list_head *p;
int err = 0;
ginfo = memdup_user(_ginfo, sizeof(*ginfo));
if (IS_ERR(ginfo))
return PTR_ERR(ginfo);
tid = ginfo->tid;
memset(ginfo, 0, sizeof(*ginfo));
ginfo->tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
ginfo->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
ginfo->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(ginfo->id, t->id, sizeof(ginfo->id));
strlcpy(ginfo->name, t->name, sizeof(ginfo->name));
ginfo->resolution = t->hw.resolution;
if (t->hw.resolution_min > 0) {
ginfo->resolution_min = t->hw.resolution_min;
ginfo->resolution_max = t->hw.resolution_max;
}
list_for_each(p, &t->open_list_head) {
ginfo->clients++;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_ginfo, ginfo, sizeof(*ginfo)))
err = -EFAULT;
kfree(ginfo);
return err;
}
static int timer_set_gparams(struct snd_timer_gparams *gparams)
{
struct snd_timer *t;
int err;
mutex_lock(®ister_mutex);
t = snd_timer_find(&gparams->tid);
if (!t) {
err = -ENODEV;
goto _error;
}
if (!list_empty(&t->open_list_head)) {
err = -EBUSY;
goto _error;
}
if (!t->hw.set_period) {
err = -ENOSYS;
goto _error;
}
err = t->hw.set_period(t, gparams->period_num, gparams->period_den);
_error:
mutex_unlock(®ister_mutex);
return err;
}
static int snd_timer_user_gparams(struct file *file,
struct snd_timer_gparams __user *_gparams)
{
struct snd_timer_gparams gparams;
if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
return -EFAULT;
return timer_set_gparams(&gparams);
}
static int snd_timer_user_gstatus(struct file *file,
struct snd_timer_gstatus __user *_gstatus)
{
struct snd_timer_gstatus gstatus;
struct snd_timer_id tid;
struct snd_timer *t;
int err = 0;
if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus)))
return -EFAULT;
tid = gstatus.tid;
memset(&gstatus, 0, sizeof(gstatus));
gstatus.tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
if (t->hw.c_resolution)
gstatus.resolution = t->hw.c_resolution(t);
else
gstatus.resolution = t->hw.resolution;
if (t->hw.precise_resolution) {
t->hw.precise_resolution(t, &gstatus.resolution_num,
&gstatus.resolution_den);
} else {
gstatus.resolution_num = gstatus.resolution;
gstatus.resolution_den = 1000000000uL;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus)))
err = -EFAULT;
return err;
}
static int snd_timer_user_tselect(struct file *file,
struct snd_timer_select __user *_tselect)
{
struct snd_timer_user *tu;
struct snd_timer_select tselect;
char str[32];
int err = 0;
tu = file->private_data;
if (tu->timeri) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
err = -EFAULT;
goto __err;
}
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
if (err < 0)
goto __err;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
tu->tqueue = NULL;
if (tu->tread) {
tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread),
GFP_KERNEL);
if (tu->tqueue == NULL)
err = -ENOMEM;
} else {
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL)
err = -ENOMEM;
}
if (err < 0) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
} else {
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
? snd_timer_user_tinterrupt : snd_timer_user_interrupt;
tu->timeri->ccallback = snd_timer_user_ccallback;
tu->timeri->callback_data = (void *)tu;
tu->timeri->disconnect = snd_timer_user_disconnect;
}
__err:
return err;
}
static int snd_timer_user_info(struct file *file,
struct snd_timer_info __user *_info)
{
struct snd_timer_user *tu;
struct snd_timer_info *info;
struct snd_timer *t;
int err = 0;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (! info)
return -ENOMEM;
info->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
info->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(info->id, t->id, sizeof(info->id));
strlcpy(info->name, t->name, sizeof(info->name));
info->resolution = t->hw.resolution;
if (copy_to_user(_info, info, sizeof(*_info)))
err = -EFAULT;
kfree(info);
return err;
}
static int snd_timer_user_params(struct file *file,
struct snd_timer_params __user *_params)
{
struct snd_timer_user *tu;
struct snd_timer_params params;
struct snd_timer *t;
struct snd_timer_read *tr;
struct snd_timer_tread *ttr;
int err;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
if (copy_from_user(¶ms, _params, sizeof(params)))
return -EFAULT;
if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) {
err = -EINVAL;
goto _end;
}
if (params.queue_size > 0 &&
(params.queue_size < 32 || params.queue_size > 1024)) {
err = -EINVAL;
goto _end;
}
if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)|
(1<<SNDRV_TIMER_EVENT_TICK)|
(1<<SNDRV_TIMER_EVENT_START)|
(1<<SNDRV_TIMER_EVENT_STOP)|
(1<<SNDRV_TIMER_EVENT_CONTINUE)|
(1<<SNDRV_TIMER_EVENT_PAUSE)|
(1<<SNDRV_TIMER_EVENT_SUSPEND)|
(1<<SNDRV_TIMER_EVENT_RESUME)|
(1<<SNDRV_TIMER_EVENT_MSTART)|
(1<<SNDRV_TIMER_EVENT_MSTOP)|
(1<<SNDRV_TIMER_EVENT_MCONTINUE)|
(1<<SNDRV_TIMER_EVENT_MPAUSE)|
(1<<SNDRV_TIMER_EVENT_MSUSPEND)|
(1<<SNDRV_TIMER_EVENT_MRESUME))) {
err = -EINVAL;
goto _end;
}
snd_timer_stop(tu->timeri);
spin_lock_irq(&t->lock);
tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO|
SNDRV_TIMER_IFLG_EXCLUSIVE|
SNDRV_TIMER_IFLG_EARLY_EVENT);
if (params.flags & SNDRV_TIMER_PSFLG_AUTO)
tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO;
if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE;
if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT;
spin_unlock_irq(&t->lock);
if (params.queue_size > 0 &&
(unsigned int)tu->queue_size != params.queue_size) {
if (tu->tread) {
ttr = kmalloc(params.queue_size * sizeof(*ttr),
GFP_KERNEL);
if (ttr) {
kfree(tu->tqueue);
tu->queue_size = params.queue_size;
tu->tqueue = ttr;
}
} else {
tr = kmalloc(params.queue_size * sizeof(*tr),
GFP_KERNEL);
if (tr) {
kfree(tu->queue);
tu->queue_size = params.queue_size;
tu->queue = tr;
}
}
}
tu->qhead = tu->qtail = tu->qused = 0;
if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
if (tu->tread) {
struct snd_timer_tread tread;
memset(&tread, 0, sizeof(tread));
tread.event = SNDRV_TIMER_EVENT_EARLY;
tread.tstamp.tv_sec = 0;
tread.tstamp.tv_nsec = 0;
tread.val = 0;
snd_timer_user_append_to_tqueue(tu, &tread);
} else {
struct snd_timer_read *r = &tu->queue[0];
r->resolution = 0;
r->ticks = 0;
tu->qused++;
tu->qtail++;
}
}
tu->filter = params.filter;
tu->ticks = params.ticks;
err = 0;
_end:
if (copy_to_user(_params, ¶ms, sizeof(params)))
return -EFAULT;
return err;
}
static int snd_timer_user_status(struct file *file,
struct snd_timer_status __user *_status)
{
struct snd_timer_user *tu;
struct snd_timer_status status;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
memset(&status, 0, sizeof(status));
status.tstamp = tu->tstamp;
status.resolution = snd_timer_resolution(tu->timeri);
status.lost = tu->timeri->lost;
status.overrun = tu->overrun;
spin_lock_irq(&tu->qlock);
status.queue = tu->qused;
spin_unlock_irq(&tu->qlock);
if (copy_to_user(_status, &status, sizeof(status)))
return -EFAULT;
return 0;
}
static int snd_timer_user_start(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
snd_timer_stop(tu->timeri);
tu->timeri->lost = 0;
tu->last_resolution = 0;
return (err = snd_timer_start(tu->timeri, tu->ticks)) < 0 ? err : 0;
}
static int snd_timer_user_stop(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_stop(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_continue(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
tu->timeri->lost = 0;
return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_pause(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_pause(tu->timeri)) < 0 ? err : 0;
}
enum {
SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20),
SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21),
SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22),
SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
};
static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu;
void __user *argp = (void __user *)arg;
int __user *p = argp;
tu = file->private_data;
switch (cmd) {
case SNDRV_TIMER_IOCTL_PVERSION:
return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0;
case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
return snd_timer_user_next_device(argp);
case SNDRV_TIMER_IOCTL_TREAD:
{
int xarg;
if (tu->timeri) /* too late */
return -EBUSY;
if (get_user(xarg, p))
return -EFAULT;
tu->tread = xarg ? 1 : 0;
return 0;
}
case SNDRV_TIMER_IOCTL_GINFO:
return snd_timer_user_ginfo(file, argp);
case SNDRV_TIMER_IOCTL_GPARAMS:
return snd_timer_user_gparams(file, argp);
case SNDRV_TIMER_IOCTL_GSTATUS:
return snd_timer_user_gstatus(file, argp);
case SNDRV_TIMER_IOCTL_SELECT:
return snd_timer_user_tselect(file, argp);
case SNDRV_TIMER_IOCTL_INFO:
return snd_timer_user_info(file, argp);
case SNDRV_TIMER_IOCTL_PARAMS:
return snd_timer_user_params(file, argp);
case SNDRV_TIMER_IOCTL_STATUS:
return snd_timer_user_status(file, argp);
case SNDRV_TIMER_IOCTL_START:
case SNDRV_TIMER_IOCTL_START_OLD:
return snd_timer_user_start(file);
case SNDRV_TIMER_IOCTL_STOP:
case SNDRV_TIMER_IOCTL_STOP_OLD:
return snd_timer_user_stop(file);
case SNDRV_TIMER_IOCTL_CONTINUE:
case SNDRV_TIMER_IOCTL_CONTINUE_OLD:
return snd_timer_user_continue(file);
case SNDRV_TIMER_IOCTL_PAUSE:
case SNDRV_TIMER_IOCTL_PAUSE_OLD:
return snd_timer_user_pause(file);
}
return -ENOTTY;
}
static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu = file->private_data;
long ret;
mutex_lock(&tu->ioctl_lock);
ret = __snd_timer_user_ioctl(file, cmd, arg);
mutex_unlock(&tu->ioctl_lock);
return ret;
}
static int snd_timer_user_fasync(int fd, struct file * file, int on)
{
struct snd_timer_user *tu;
tu = file->private_data;
return fasync_helper(fd, file, on, &tu->fasync);
}
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
size_t count, loff_t *offset)
{
struct snd_timer_user *tu;
long result = 0, unit;
int qhead;
int err = 0;
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto _error;
}
set_current_state(TASK_INTERRUPTIBLE);
init_waitqueue_entry(&wait, current);
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
schedule();
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
if (tu->disconnected) {
err = -ENODEV;
goto _error;
}
if (signal_pending(current)) {
err = -ERESTARTSYS;
goto _error;
}
}
qhead = tu->qhead++;
tu->qhead %= tu->queue_size;
spin_unlock_irq(&tu->qlock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
err = -EFAULT;
} else {
if (copy_to_user(buffer, &tu->queue[qhead],
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
spin_lock_irq(&tu->qlock);
tu->qused--;
if (err < 0)
goto _error;
result += unit;
buffer += unit;
}
_error:
spin_unlock_irq(&tu->qlock);
return result > 0 ? result : err;
}
static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
{
unsigned int mask;
struct snd_timer_user *tu;
tu = file->private_data;
poll_wait(file, &tu->qchange_sleep, wait);
mask = 0;
if (tu->qused)
mask |= POLLIN | POLLRDNORM;
if (tu->disconnected)
mask |= POLLERR;
return mask;
}
#ifdef CONFIG_COMPAT
#include "timer_compat.c"
#else
#define snd_timer_user_ioctl_compat NULL
#endif
static const struct file_operations snd_timer_f_ops =
{
.owner = THIS_MODULE,
.read = snd_timer_user_read,
.open = snd_timer_user_open,
.release = snd_timer_user_release,
.llseek = no_llseek,
.poll = snd_timer_user_poll,
.unlocked_ioctl = snd_timer_user_ioctl,
.compat_ioctl = snd_timer_user_ioctl_compat,
.fasync = snd_timer_user_fasync,
};
/* unregister the system timer */
static void snd_timer_free_all(void)
{
struct snd_timer *timer, *n;
list_for_each_entry_safe(timer, n, &snd_timer_list, device_list)
snd_timer_free(timer);
}
static struct device timer_dev;
/*
* ENTRY functions
*/
static int __init alsa_timer_init(void)
{
int err;
snd_device_initialize(&timer_dev, NULL);
dev_set_name(&timer_dev, "timer");
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1,
"system timer");
#endif
err = snd_timer_register_system();
if (err < 0) {
pr_err("ALSA: unable to register system timer (%i)\n", err);
put_device(&timer_dev);
return err;
}
err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0,
&snd_timer_f_ops, NULL, &timer_dev);
if (err < 0) {
pr_err("ALSA: unable to register timer device (%i)\n", err);
snd_timer_free_all();
put_device(&timer_dev);
return err;
}
snd_timer_proc_init();
return 0;
}
static void __exit alsa_timer_exit(void)
{
snd_unregister_device(&timer_dev);
snd_timer_free_all();
put_device(&timer_dev);
snd_timer_proc_done();
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1);
#endif
}
module_init(alsa_timer_init)
module_exit(alsa_timer_exit)
EXPORT_SYMBOL(snd_timer_open);
EXPORT_SYMBOL(snd_timer_close);
EXPORT_SYMBOL(snd_timer_resolution);
EXPORT_SYMBOL(snd_timer_start);
EXPORT_SYMBOL(snd_timer_stop);
EXPORT_SYMBOL(snd_timer_continue);
EXPORT_SYMBOL(snd_timer_pause);
EXPORT_SYMBOL(snd_timer_new);
EXPORT_SYMBOL(snd_timer_notify);
EXPORT_SYMBOL(snd_timer_global_new);
EXPORT_SYMBOL(snd_timer_global_free);
EXPORT_SYMBOL(snd_timer_global_register);
EXPORT_SYMBOL(snd_timer_interrupt);
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_5057_0 |
crossvul-cpp_data_bad_3769_2 | /*
* linux/fs/exec.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* #!-checking implemented by tytso.
*/
/*
* Demand-loading implemented 01.12.91 - no need to read anything but
* the header into memory. The inode of the executable is put into
* "current->executable", and page faults do the actual loading. Clean.
*
* Once more I can proudly say that linux stood up to being changed: it
* was less than 2 hours work to get demand-loading completely implemented.
*
* Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
* current->executable is only used by the procfs. This allows a dispatch
* table to check for several different types of binary formats. We keep
* trying until we recognize the file or we run out of supported binary
* formats.
*/
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
#include <linux/swap.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/perf_event.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
#include <linux/key.h>
#include <linux/personality.h>
#include <linux/binfmts.h>
#include <linux/utsname.h>
#include <linux/pid_namespace.h>
#include <linux/module.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
#include <linux/audit.h>
#include <linux/tracehook.h>
#include <linux/kmod.h>
#include <linux/fsnotify.h>
#include <linux/fs_struct.h>
#include <linux/pipe_fs_i.h>
#include <linux/oom.h>
#include <linux/compat.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <trace/events/task.h>
#include "internal.h"
#include "coredump.h"
#include <trace/events/sched.h>
int suid_dumpable = 0;
static LIST_HEAD(formats);
static DEFINE_RWLOCK(binfmt_lock);
void __register_binfmt(struct linux_binfmt * fmt, int insert)
{
BUG_ON(!fmt);
write_lock(&binfmt_lock);
insert ? list_add(&fmt->lh, &formats) :
list_add_tail(&fmt->lh, &formats);
write_unlock(&binfmt_lock);
}
EXPORT_SYMBOL(__register_binfmt);
void unregister_binfmt(struct linux_binfmt * fmt)
{
write_lock(&binfmt_lock);
list_del(&fmt->lh);
write_unlock(&binfmt_lock);
}
EXPORT_SYMBOL(unregister_binfmt);
static inline void put_binfmt(struct linux_binfmt * fmt)
{
module_put(fmt->module);
}
/*
* Note that a shared library must be both readable and executable due to
* security reasons.
*
* Also note that we take the address to load from from the file itself.
*/
SYSCALL_DEFINE1(uselib, const char __user *, library)
{
struct file *file;
struct filename *tmp = getname(library);
int error = PTR_ERR(tmp);
static const struct open_flags uselib_flags = {
.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
.acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
.intent = LOOKUP_OPEN
};
if (IS_ERR(tmp))
goto out;
file = do_filp_open(AT_FDCWD, tmp, &uselib_flags, LOOKUP_FOLLOW);
putname(tmp);
error = PTR_ERR(file);
if (IS_ERR(file))
goto out;
error = -EINVAL;
if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
goto exit;
error = -EACCES;
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
goto exit;
fsnotify_open(file);
error = -ENOEXEC;
if(file->f_op) {
struct linux_binfmt * fmt;
read_lock(&binfmt_lock);
list_for_each_entry(fmt, &formats, lh) {
if (!fmt->load_shlib)
continue;
if (!try_module_get(fmt->module))
continue;
read_unlock(&binfmt_lock);
error = fmt->load_shlib(file);
read_lock(&binfmt_lock);
put_binfmt(fmt);
if (error != -ENOEXEC)
break;
}
read_unlock(&binfmt_lock);
}
exit:
fput(file);
out:
return error;
}
#ifdef CONFIG_MMU
/*
* The nascent bprm->mm is not visible until exec_mmap() but it can
* use a lot of memory, account these pages in current->mm temporary
* for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
* change the counter back via acct_arg_size(0).
*/
static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
{
struct mm_struct *mm = current->mm;
long diff = (long)(pages - bprm->vma_pages);
if (!mm || !diff)
return;
bprm->vma_pages = pages;
add_mm_counter(mm, MM_ANONPAGES, diff);
}
static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
int write)
{
struct page *page;
int ret;
#ifdef CONFIG_STACK_GROWSUP
if (write) {
ret = expand_downwards(bprm->vma, pos);
if (ret < 0)
return NULL;
}
#endif
ret = get_user_pages(current, bprm->mm, pos,
1, write, 1, &page, NULL);
if (ret <= 0)
return NULL;
if (write) {
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
struct rlimit *rlim;
acct_arg_size(bprm, size / PAGE_SIZE);
/*
* We've historically supported up to 32 pages (ARG_MAX)
* of argument strings even with small stacks
*/
if (size <= ARG_MAX)
return page;
/*
* Limit to 1/4-th the stack size for the argv+env strings.
* This ensures that:
* - the remaining binfmt code will not run out of stack space,
* - the program will have a reasonable amount of stack left
* to work from.
*/
rlim = current->signal->rlim;
if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
put_page(page);
return NULL;
}
}
return page;
}
static void put_arg_page(struct page *page)
{
put_page(page);
}
static void free_arg_page(struct linux_binprm *bprm, int i)
{
}
static void free_arg_pages(struct linux_binprm *bprm)
{
}
static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
struct page *page)
{
flush_cache_page(bprm->vma, pos, page_to_pfn(page));
}
static int __bprm_mm_init(struct linux_binprm *bprm)
{
int err;
struct vm_area_struct *vma = NULL;
struct mm_struct *mm = bprm->mm;
bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma)
return -ENOMEM;
down_write(&mm->mmap_sem);
vma->vm_mm = mm;
/*
* Place the stack at the largest stack address the architecture
* supports. Later, we'll move this to an appropriate place. We don't
* use STACK_TOP because that can depend on attributes which aren't
* configured yet.
*/
BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
vma->vm_end = STACK_TOP_MAX;
vma->vm_start = vma->vm_end - PAGE_SIZE;
vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
INIT_LIST_HEAD(&vma->anon_vma_chain);
err = insert_vm_struct(mm, vma);
if (err)
goto err;
mm->stack_vm = mm->total_vm = 1;
up_write(&mm->mmap_sem);
bprm->p = vma->vm_end - sizeof(void *);
return 0;
err:
up_write(&mm->mmap_sem);
bprm->vma = NULL;
kmem_cache_free(vm_area_cachep, vma);
return err;
}
static bool valid_arg_len(struct linux_binprm *bprm, long len)
{
return len <= MAX_ARG_STRLEN;
}
#else
static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
{
}
static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
int write)
{
struct page *page;
page = bprm->page[pos / PAGE_SIZE];
if (!page && write) {
page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
if (!page)
return NULL;
bprm->page[pos / PAGE_SIZE] = page;
}
return page;
}
static void put_arg_page(struct page *page)
{
}
static void free_arg_page(struct linux_binprm *bprm, int i)
{
if (bprm->page[i]) {
__free_page(bprm->page[i]);
bprm->page[i] = NULL;
}
}
static void free_arg_pages(struct linux_binprm *bprm)
{
int i;
for (i = 0; i < MAX_ARG_PAGES; i++)
free_arg_page(bprm, i);
}
static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
struct page *page)
{
}
static int __bprm_mm_init(struct linux_binprm *bprm)
{
bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
return 0;
}
static bool valid_arg_len(struct linux_binprm *bprm, long len)
{
return len <= bprm->p;
}
#endif /* CONFIG_MMU */
/*
* Create a new mm_struct and populate it with a temporary stack
* vm_area_struct. We don't have enough context at this point to set the stack
* flags, permissions, and offset, so we use temporary values. We'll update
* them later in setup_arg_pages().
*/
int bprm_mm_init(struct linux_binprm *bprm)
{
int err;
struct mm_struct *mm = NULL;
bprm->mm = mm = mm_alloc();
err = -ENOMEM;
if (!mm)
goto err;
err = init_new_context(current, mm);
if (err)
goto err;
err = __bprm_mm_init(bprm);
if (err)
goto err;
return 0;
err:
if (mm) {
bprm->mm = NULL;
mmdrop(mm);
}
return err;
}
struct user_arg_ptr {
#ifdef CONFIG_COMPAT
bool is_compat;
#endif
union {
const char __user *const __user *native;
#ifdef CONFIG_COMPAT
const compat_uptr_t __user *compat;
#endif
} ptr;
};
static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
{
const char __user *native;
#ifdef CONFIG_COMPAT
if (unlikely(argv.is_compat)) {
compat_uptr_t compat;
if (get_user(compat, argv.ptr.compat + nr))
return ERR_PTR(-EFAULT);
return compat_ptr(compat);
}
#endif
if (get_user(native, argv.ptr.native + nr))
return ERR_PTR(-EFAULT);
return native;
}
/*
* count() counts the number of strings in array ARGV.
*/
static int count(struct user_arg_ptr argv, int max)
{
int i = 0;
if (argv.ptr.native != NULL) {
for (;;) {
const char __user *p = get_user_arg_ptr(argv, i);
if (!p)
break;
if (IS_ERR(p))
return -EFAULT;
if (i++ >= max)
return -E2BIG;
if (fatal_signal_pending(current))
return -ERESTARTNOHAND;
cond_resched();
}
}
return i;
}
/*
* 'copy_strings()' copies argument/environment strings from the old
* processes's memory to the new process's stack. The call to get_user_pages()
* ensures the destination page is created and not swapped out.
*/
static int copy_strings(int argc, struct user_arg_ptr argv,
struct linux_binprm *bprm)
{
struct page *kmapped_page = NULL;
char *kaddr = NULL;
unsigned long kpos = 0;
int ret;
while (argc-- > 0) {
const char __user *str;
int len;
unsigned long pos;
ret = -EFAULT;
str = get_user_arg_ptr(argv, argc);
if (IS_ERR(str))
goto out;
len = strnlen_user(str, MAX_ARG_STRLEN);
if (!len)
goto out;
ret = -E2BIG;
if (!valid_arg_len(bprm, len))
goto out;
/* We're going to work our way backwords. */
pos = bprm->p;
str += len;
bprm->p -= len;
while (len > 0) {
int offset, bytes_to_copy;
if (fatal_signal_pending(current)) {
ret = -ERESTARTNOHAND;
goto out;
}
cond_resched();
offset = pos % PAGE_SIZE;
if (offset == 0)
offset = PAGE_SIZE;
bytes_to_copy = offset;
if (bytes_to_copy > len)
bytes_to_copy = len;
offset -= bytes_to_copy;
pos -= bytes_to_copy;
str -= bytes_to_copy;
len -= bytes_to_copy;
if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
struct page *page;
page = get_arg_page(bprm, pos, 1);
if (!page) {
ret = -E2BIG;
goto out;
}
if (kmapped_page) {
flush_kernel_dcache_page(kmapped_page);
kunmap(kmapped_page);
put_arg_page(kmapped_page);
}
kmapped_page = page;
kaddr = kmap(kmapped_page);
kpos = pos & PAGE_MASK;
flush_arg_page(bprm, kpos, kmapped_page);
}
if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
ret = -EFAULT;
goto out;
}
}
}
ret = 0;
out:
if (kmapped_page) {
flush_kernel_dcache_page(kmapped_page);
kunmap(kmapped_page);
put_arg_page(kmapped_page);
}
return ret;
}
/*
* Like copy_strings, but get argv and its values from kernel memory.
*/
int copy_strings_kernel(int argc, const char *const *__argv,
struct linux_binprm *bprm)
{
int r;
mm_segment_t oldfs = get_fs();
struct user_arg_ptr argv = {
.ptr.native = (const char __user *const __user *)__argv,
};
set_fs(KERNEL_DS);
r = copy_strings(argc, argv, bprm);
set_fs(oldfs);
return r;
}
EXPORT_SYMBOL(copy_strings_kernel);
#ifdef CONFIG_MMU
/*
* During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
* the binfmt code determines where the new stack should reside, we shift it to
* its final location. The process proceeds as follows:
*
* 1) Use shift to calculate the new vma endpoints.
* 2) Extend vma to cover both the old and new ranges. This ensures the
* arguments passed to subsequent functions are consistent.
* 3) Move vma's page tables to the new range.
* 4) Free up any cleared pgd range.
* 5) Shrink the vma to cover only the new range.
*/
static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long old_start = vma->vm_start;
unsigned long old_end = vma->vm_end;
unsigned long length = old_end - old_start;
unsigned long new_start = old_start - shift;
unsigned long new_end = old_end - shift;
struct mmu_gather tlb;
BUG_ON(new_start > new_end);
/*
* ensure there are no vmas between where we want to go
* and where we are
*/
if (vma != find_vma(mm, new_start))
return -EFAULT;
/*
* cover the whole range: [new_start, old_end)
*/
if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
return -ENOMEM;
/*
* move the page tables downwards, on failure we rely on
* process cleanup to remove whatever mess we made.
*/
if (length != move_page_tables(vma, old_start,
vma, new_start, length, false))
return -ENOMEM;
lru_add_drain();
tlb_gather_mmu(&tlb, mm, 0);
if (new_end > old_start) {
/*
* when the old and new regions overlap clear from new_end.
*/
free_pgd_range(&tlb, new_end, old_end, new_end,
vma->vm_next ? vma->vm_next->vm_start : 0);
} else {
/*
* otherwise, clean from old_start; this is done to not touch
* the address space in [new_end, old_start) some architectures
* have constraints on va-space that make this illegal (IA64) -
* for the others its just a little faster.
*/
free_pgd_range(&tlb, old_start, old_end, new_end,
vma->vm_next ? vma->vm_next->vm_start : 0);
}
tlb_finish_mmu(&tlb, new_end, old_end);
/*
* Shrink the vma to just the new range. Always succeeds.
*/
vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
return 0;
}
/*
* Finalizes the stack vm_area_struct. The flags and permissions are updated,
* the stack is optionally relocated, and some extra space is added.
*/
int setup_arg_pages(struct linux_binprm *bprm,
unsigned long stack_top,
int executable_stack)
{
unsigned long ret;
unsigned long stack_shift;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = bprm->vma;
struct vm_area_struct *prev = NULL;
unsigned long vm_flags;
unsigned long stack_base;
unsigned long stack_size;
unsigned long stack_expand;
unsigned long rlim_stack;
#ifdef CONFIG_STACK_GROWSUP
/* Limit stack size to 1GB */
stack_base = rlimit_max(RLIMIT_STACK);
if (stack_base > (1 << 30))
stack_base = 1 << 30;
/* Make sure we didn't let the argument array grow too large. */
if (vma->vm_end - vma->vm_start > stack_base)
return -ENOMEM;
stack_base = PAGE_ALIGN(stack_top - stack_base);
stack_shift = vma->vm_start - stack_base;
mm->arg_start = bprm->p - stack_shift;
bprm->p = vma->vm_end - stack_shift;
#else
stack_top = arch_align_stack(stack_top);
stack_top = PAGE_ALIGN(stack_top);
if (unlikely(stack_top < mmap_min_addr) ||
unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
return -ENOMEM;
stack_shift = vma->vm_end - stack_top;
bprm->p -= stack_shift;
mm->arg_start = bprm->p;
#endif
if (bprm->loader)
bprm->loader -= stack_shift;
bprm->exec -= stack_shift;
down_write(&mm->mmap_sem);
vm_flags = VM_STACK_FLAGS;
/*
* Adjust stack execute permissions; explicitly enable for
* EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
* (arch default) otherwise.
*/
if (unlikely(executable_stack == EXSTACK_ENABLE_X))
vm_flags |= VM_EXEC;
else if (executable_stack == EXSTACK_DISABLE_X)
vm_flags &= ~VM_EXEC;
vm_flags |= mm->def_flags;
vm_flags |= VM_STACK_INCOMPLETE_SETUP;
ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
vm_flags);
if (ret)
goto out_unlock;
BUG_ON(prev != vma);
/* Move stack pages down in memory. */
if (stack_shift) {
ret = shift_arg_pages(vma, stack_shift);
if (ret)
goto out_unlock;
}
/* mprotect_fixup is overkill to remove the temporary stack flags */
vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
stack_size = vma->vm_end - vma->vm_start;
/*
* Align this down to a page boundary as expand_stack
* will align it up.
*/
rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
#ifdef CONFIG_STACK_GROWSUP
if (stack_size + stack_expand > rlim_stack)
stack_base = vma->vm_start + rlim_stack;
else
stack_base = vma->vm_end + stack_expand;
#else
if (stack_size + stack_expand > rlim_stack)
stack_base = vma->vm_end - rlim_stack;
else
stack_base = vma->vm_start - stack_expand;
#endif
current->mm->start_stack = bprm->p;
ret = expand_stack(vma, stack_base);
if (ret)
ret = -EFAULT;
out_unlock:
up_write(&mm->mmap_sem);
return ret;
}
EXPORT_SYMBOL(setup_arg_pages);
#endif /* CONFIG_MMU */
struct file *open_exec(const char *name)
{
struct file *file;
int err;
struct filename tmp = { .name = name };
static const struct open_flags open_exec_flags = {
.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
.acc_mode = MAY_EXEC | MAY_OPEN,
.intent = LOOKUP_OPEN
};
file = do_filp_open(AT_FDCWD, &tmp, &open_exec_flags, LOOKUP_FOLLOW);
if (IS_ERR(file))
goto out;
err = -EACCES;
if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
goto exit;
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
goto exit;
fsnotify_open(file);
err = deny_write_access(file);
if (err)
goto exit;
out:
return file;
exit:
fput(file);
return ERR_PTR(err);
}
EXPORT_SYMBOL(open_exec);
int kernel_read(struct file *file, loff_t offset,
char *addr, unsigned long count)
{
mm_segment_t old_fs;
loff_t pos = offset;
int result;
old_fs = get_fs();
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
result = vfs_read(file, (void __user *)addr, count, &pos);
set_fs(old_fs);
return result;
}
EXPORT_SYMBOL(kernel_read);
static int exec_mmap(struct mm_struct *mm)
{
struct task_struct *tsk;
struct mm_struct * old_mm, *active_mm;
/* Notify parent that we're no longer interested in the old VM */
tsk = current;
old_mm = current->mm;
mm_release(tsk, old_mm);
if (old_mm) {
sync_mm_rss(old_mm);
/*
* Make sure that if there is a core dump in progress
* for the old mm, we get out and die instead of going
* through with the exec. We must hold mmap_sem around
* checking core_state and changing tsk->mm.
*/
down_read(&old_mm->mmap_sem);
if (unlikely(old_mm->core_state)) {
up_read(&old_mm->mmap_sem);
return -EINTR;
}
}
task_lock(tsk);
active_mm = tsk->active_mm;
tsk->mm = mm;
tsk->active_mm = mm;
activate_mm(active_mm, mm);
task_unlock(tsk);
arch_pick_mmap_layout(mm);
if (old_mm) {
up_read(&old_mm->mmap_sem);
BUG_ON(active_mm != old_mm);
setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
mm_update_next_owner(old_mm);
mmput(old_mm);
return 0;
}
mmdrop(active_mm);
return 0;
}
/*
* This function makes sure the current process has its own signal table,
* so that flush_signal_handlers can later reset the handlers without
* disturbing other processes. (Other processes might share the signal
* table via the CLONE_SIGHAND option to clone().)
*/
static int de_thread(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
struct sighand_struct *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock;
if (thread_group_empty(tsk))
goto no_thread_group;
/*
* Kill all other threads in the thread group.
*/
spin_lock_irq(lock);
if (signal_group_exit(sig)) {
/*
* Another group action in progress, just
* return so that the signal is processed.
*/
spin_unlock_irq(lock);
return -EAGAIN;
}
sig->group_exit_task = tsk;
sig->notify_count = zap_other_threads(tsk);
if (!thread_group_leader(tsk))
sig->notify_count--;
while (sig->notify_count) {
__set_current_state(TASK_KILLABLE);
spin_unlock_irq(lock);
schedule();
if (unlikely(__fatal_signal_pending(tsk)))
goto killed;
spin_lock_irq(lock);
}
spin_unlock_irq(lock);
/*
* At this point all other threads have exited, all we have to
* do is to wait for the thread group leader to become inactive,
* and to assume its PID:
*/
if (!thread_group_leader(tsk)) {
struct task_struct *leader = tsk->group_leader;
sig->notify_count = -1; /* for exit_notify() */
for (;;) {
write_lock_irq(&tasklist_lock);
if (likely(leader->exit_state))
break;
__set_current_state(TASK_KILLABLE);
write_unlock_irq(&tasklist_lock);
schedule();
if (unlikely(__fatal_signal_pending(tsk)))
goto killed;
}
/*
* The only record we have of the real-time age of a
* process, regardless of execs it's done, is start_time.
* All the past CPU time is accumulated in signal_struct
* from sister threads now dead. But in this non-leader
* exec, nothing survives from the original leader thread,
* whose birth marks the true age of this process now.
* When we take on its identity by switching to its PID, we
* also take its birthdate (always earlier than our own).
*/
tsk->start_time = leader->start_time;
BUG_ON(!same_thread_group(leader, tsk));
BUG_ON(has_group_leader_pid(tsk));
/*
* An exec() starts a new thread group with the
* TGID of the previous thread group. Rehash the
* two threads with a switched PID, and release
* the former thread group leader:
*/
/* Become a process group leader with the old leader's pid.
* The old leader becomes a thread of the this thread group.
* Note: The old leader also uses this pid until release_task
* is called. Odd but simple and correct.
*/
detach_pid(tsk, PIDTYPE_PID);
tsk->pid = leader->pid;
attach_pid(tsk, PIDTYPE_PID, task_pid(leader));
transfer_pid(leader, tsk, PIDTYPE_PGID);
transfer_pid(leader, tsk, PIDTYPE_SID);
list_replace_rcu(&leader->tasks, &tsk->tasks);
list_replace_init(&leader->sibling, &tsk->sibling);
tsk->group_leader = tsk;
leader->group_leader = tsk;
tsk->exit_signal = SIGCHLD;
leader->exit_signal = -1;
BUG_ON(leader->exit_state != EXIT_ZOMBIE);
leader->exit_state = EXIT_DEAD;
/*
* We are going to release_task()->ptrace_unlink() silently,
* the tracer can sleep in do_wait(). EXIT_DEAD guarantees
* the tracer wont't block again waiting for this thread.
*/
if (unlikely(leader->ptrace))
__wake_up_parent(leader, leader->parent);
write_unlock_irq(&tasklist_lock);
release_task(leader);
}
sig->group_exit_task = NULL;
sig->notify_count = 0;
no_thread_group:
/* we have changed execution domain */
tsk->exit_signal = SIGCHLD;
exit_itimers(sig);
flush_itimer_signals();
if (atomic_read(&oldsighand->count) != 1) {
struct sighand_struct *newsighand;
/*
* This ->sighand is shared with the CLONE_SIGHAND
* but not CLONE_THREAD task, switch to the new one.
*/
newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
if (!newsighand)
return -ENOMEM;
atomic_set(&newsighand->count, 1);
memcpy(newsighand->action, oldsighand->action,
sizeof(newsighand->action));
write_lock_irq(&tasklist_lock);
spin_lock(&oldsighand->siglock);
rcu_assign_pointer(tsk->sighand, newsighand);
spin_unlock(&oldsighand->siglock);
write_unlock_irq(&tasklist_lock);
__cleanup_sighand(oldsighand);
}
BUG_ON(!thread_group_leader(tsk));
return 0;
killed:
/* protects against exit_notify() and __exit_signal() */
read_lock(&tasklist_lock);
sig->group_exit_task = NULL;
sig->notify_count = 0;
read_unlock(&tasklist_lock);
return -EAGAIN;
}
char *get_task_comm(char *buf, struct task_struct *tsk)
{
/* buf must be at least sizeof(tsk->comm) in size */
task_lock(tsk);
strncpy(buf, tsk->comm, sizeof(tsk->comm));
task_unlock(tsk);
return buf;
}
EXPORT_SYMBOL_GPL(get_task_comm);
/*
* These functions flushes out all traces of the currently running executable
* so that a new one can be started
*/
void set_task_comm(struct task_struct *tsk, char *buf)
{
task_lock(tsk);
trace_task_rename(tsk, buf);
/*
* Threads may access current->comm without holding
* the task lock, so write the string carefully.
* Readers without a lock may see incomplete new
* names but are safe from non-terminating string reads.
*/
memset(tsk->comm, 0, TASK_COMM_LEN);
wmb();
strlcpy(tsk->comm, buf, sizeof(tsk->comm));
task_unlock(tsk);
perf_event_comm(tsk);
}
static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
{
int i, ch;
/* Copies the binary name from after last slash */
for (i = 0; (ch = *(fn++)) != '\0';) {
if (ch == '/')
i = 0; /* overwrite what we wrote */
else
if (i < len - 1)
tcomm[i++] = ch;
}
tcomm[i] = '\0';
}
int flush_old_exec(struct linux_binprm * bprm)
{
int retval;
/*
* Make sure we have a private signal table and that
* we are unassociated from the previous thread group.
*/
retval = de_thread(current);
if (retval)
goto out;
set_mm_exe_file(bprm->mm, bprm->file);
filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
/*
* Release all of the old mmap stuff
*/
acct_arg_size(bprm, 0);
retval = exec_mmap(bprm->mm);
if (retval)
goto out;
bprm->mm = NULL; /* We're using it now */
set_fs(USER_DS);
current->flags &=
~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | PF_NOFREEZE);
flush_thread();
current->personality &= ~bprm->per_clear;
return 0;
out:
return retval;
}
EXPORT_SYMBOL(flush_old_exec);
void would_dump(struct linux_binprm *bprm, struct file *file)
{
if (inode_permission(file->f_path.dentry->d_inode, MAY_READ) < 0)
bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
}
EXPORT_SYMBOL(would_dump);
void setup_new_exec(struct linux_binprm * bprm)
{
arch_pick_mmap_layout(current->mm);
/* This is the point of no return */
current->sas_ss_sp = current->sas_ss_size = 0;
if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
set_dumpable(current->mm, SUID_DUMPABLE_ENABLED);
else
set_dumpable(current->mm, suid_dumpable);
set_task_comm(current, bprm->tcomm);
/* Set the new mm task size. We have to do that late because it may
* depend on TIF_32BIT which is only updated in flush_thread() on
* some architectures like powerpc
*/
current->mm->task_size = TASK_SIZE;
/* install the new credentials */
if (!uid_eq(bprm->cred->uid, current_euid()) ||
!gid_eq(bprm->cred->gid, current_egid())) {
current->pdeath_signal = 0;
} else {
would_dump(bprm, bprm->file);
if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)
set_dumpable(current->mm, suid_dumpable);
}
/*
* Flush performance counters when crossing a
* security domain:
*/
if (!get_dumpable(current->mm))
perf_event_exit_task(current);
/* An exec changes our domain. We are no longer part of the thread
group */
current->self_exec_id++;
flush_signal_handlers(current, 0);
do_close_on_exec(current->files);
}
EXPORT_SYMBOL(setup_new_exec);
/*
* Prepare credentials and lock ->cred_guard_mutex.
* install_exec_creds() commits the new creds and drops the lock.
* Or, if exec fails before, free_bprm() should release ->cred and
* and unlock.
*/
int prepare_bprm_creds(struct linux_binprm *bprm)
{
if (mutex_lock_interruptible(¤t->signal->cred_guard_mutex))
return -ERESTARTNOINTR;
bprm->cred = prepare_exec_creds();
if (likely(bprm->cred))
return 0;
mutex_unlock(¤t->signal->cred_guard_mutex);
return -ENOMEM;
}
void free_bprm(struct linux_binprm *bprm)
{
free_arg_pages(bprm);
if (bprm->cred) {
mutex_unlock(¤t->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
kfree(bprm);
}
/*
* install the new credentials for this executable
*/
void install_exec_creds(struct linux_binprm *bprm)
{
security_bprm_committing_creds(bprm);
commit_creds(bprm->cred);
bprm->cred = NULL;
/*
* cred_guard_mutex must be held at least to this point to prevent
* ptrace_attach() from altering our determination of the task's
* credentials; any time after this it may be unlocked.
*/
security_bprm_committed_creds(bprm);
mutex_unlock(¤t->signal->cred_guard_mutex);
}
EXPORT_SYMBOL(install_exec_creds);
/*
* determine how safe it is to execute the proposed program
* - the caller must hold ->cred_guard_mutex to protect against
* PTRACE_ATTACH
*/
static int check_unsafe_exec(struct linux_binprm *bprm)
{
struct task_struct *p = current, *t;
unsigned n_fs;
int res = 0;
if (p->ptrace) {
if (p->ptrace & PT_PTRACE_CAP)
bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
else
bprm->unsafe |= LSM_UNSAFE_PTRACE;
}
/*
* This isn't strictly necessary, but it makes it harder for LSMs to
* mess up.
*/
if (current->no_new_privs)
bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
n_fs = 1;
spin_lock(&p->fs->lock);
rcu_read_lock();
for (t = next_thread(p); t != p; t = next_thread(t)) {
if (t->fs == p->fs)
n_fs++;
}
rcu_read_unlock();
if (p->fs->users > n_fs) {
bprm->unsafe |= LSM_UNSAFE_SHARE;
} else {
res = -EAGAIN;
if (!p->fs->in_exec) {
p->fs->in_exec = 1;
res = 1;
}
}
spin_unlock(&p->fs->lock);
return res;
}
/*
* Fill the binprm structure from the inode.
* Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
*
* This may be called multiple times for binary chains (scripts for example).
*/
int prepare_binprm(struct linux_binprm *bprm)
{
umode_t mode;
struct inode * inode = bprm->file->f_path.dentry->d_inode;
int retval;
mode = inode->i_mode;
if (bprm->file->f_op == NULL)
return -EACCES;
/* clear any previous set[ug]id data from a previous binary */
bprm->cred->euid = current_euid();
bprm->cred->egid = current_egid();
if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
!current->no_new_privs &&
kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
/* Set-uid? */
if (mode & S_ISUID) {
bprm->per_clear |= PER_CLEAR_ON_SETID;
bprm->cred->euid = inode->i_uid;
}
/* Set-gid? */
/*
* If setgid is set but no group execute bit then this
* is a candidate for mandatory locking, not a setgid
* executable.
*/
if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
bprm->per_clear |= PER_CLEAR_ON_SETID;
bprm->cred->egid = inode->i_gid;
}
}
/* fill in binprm security blob */
retval = security_bprm_set_creds(bprm);
if (retval)
return retval;
bprm->cred_prepared = 1;
memset(bprm->buf, 0, BINPRM_BUF_SIZE);
return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
}
EXPORT_SYMBOL(prepare_binprm);
/*
* Arguments are '\0' separated strings found at the location bprm->p
* points to; chop off the first by relocating brpm->p to right after
* the first '\0' encountered.
*/
int remove_arg_zero(struct linux_binprm *bprm)
{
int ret = 0;
unsigned long offset;
char *kaddr;
struct page *page;
if (!bprm->argc)
return 0;
do {
offset = bprm->p & ~PAGE_MASK;
page = get_arg_page(bprm, bprm->p, 0);
if (!page) {
ret = -EFAULT;
goto out;
}
kaddr = kmap_atomic(page);
for (; offset < PAGE_SIZE && kaddr[offset];
offset++, bprm->p++)
;
kunmap_atomic(kaddr);
put_arg_page(page);
if (offset == PAGE_SIZE)
free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
} while (offset == PAGE_SIZE);
bprm->p++;
bprm->argc--;
ret = 0;
out:
return ret;
}
EXPORT_SYMBOL(remove_arg_zero);
/*
* cycle the list of binary formats handler, until one recognizes the image
*/
int search_binary_handler(struct linux_binprm *bprm)
{
unsigned int depth = bprm->recursion_depth;
int try,retval;
struct linux_binfmt *fmt;
pid_t old_pid, old_vpid;
/* This allows 4 levels of binfmt rewrites before failing hard. */
if (depth > 5)
return -ELOOP;
retval = security_bprm_check(bprm);
if (retval)
return retval;
retval = audit_bprm(bprm);
if (retval)
return retval;
/* Need to fetch pid before load_binary changes it */
old_pid = current->pid;
rcu_read_lock();
old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
rcu_read_unlock();
retval = -ENOENT;
for (try=0; try<2; try++) {
read_lock(&binfmt_lock);
list_for_each_entry(fmt, &formats, lh) {
int (*fn)(struct linux_binprm *) = fmt->load_binary;
if (!fn)
continue;
if (!try_module_get(fmt->module))
continue;
read_unlock(&binfmt_lock);
bprm->recursion_depth = depth + 1;
retval = fn(bprm);
bprm->recursion_depth = depth;
if (retval >= 0) {
if (depth == 0) {
trace_sched_process_exec(current, old_pid, bprm);
ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
}
put_binfmt(fmt);
allow_write_access(bprm->file);
if (bprm->file)
fput(bprm->file);
bprm->file = NULL;
current->did_exec = 1;
proc_exec_connector(current);
return retval;
}
read_lock(&binfmt_lock);
put_binfmt(fmt);
if (retval != -ENOEXEC || bprm->mm == NULL)
break;
if (!bprm->file) {
read_unlock(&binfmt_lock);
return retval;
}
}
read_unlock(&binfmt_lock);
#ifdef CONFIG_MODULES
if (retval != -ENOEXEC || bprm->mm == NULL) {
break;
} else {
#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
if (printable(bprm->buf[0]) &&
printable(bprm->buf[1]) &&
printable(bprm->buf[2]) &&
printable(bprm->buf[3]))
break; /* -ENOEXEC */
if (try)
break; /* -ENOEXEC */
request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
}
#else
break;
#endif
}
return retval;
}
EXPORT_SYMBOL(search_binary_handler);
/*
* sys_execve() executes a new program.
*/
static int do_execve_common(const char *filename,
struct user_arg_ptr argv,
struct user_arg_ptr envp)
{
struct linux_binprm *bprm;
struct file *file;
struct files_struct *displaced;
bool clear_in_exec;
int retval;
const struct cred *cred = current_cred();
/*
* We move the actual failure in case of RLIMIT_NPROC excess from
* set*uid() to execve() because too many poorly written programs
* don't check setuid() return code. Here we additionally recheck
* whether NPROC limit is still exceeded.
*/
if ((current->flags & PF_NPROC_EXCEEDED) &&
atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
retval = -EAGAIN;
goto out_ret;
}
/* We're below the limit (still or again), so we don't want to make
* further execve() calls fail. */
current->flags &= ~PF_NPROC_EXCEEDED;
retval = unshare_files(&displaced);
if (retval)
goto out_ret;
retval = -ENOMEM;
bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
if (!bprm)
goto out_files;
retval = prepare_bprm_creds(bprm);
if (retval)
goto out_free;
retval = check_unsafe_exec(bprm);
if (retval < 0)
goto out_free;
clear_in_exec = retval;
current->in_execve = 1;
file = open_exec(filename);
retval = PTR_ERR(file);
if (IS_ERR(file))
goto out_unmark;
sched_exec();
bprm->file = file;
bprm->filename = filename;
bprm->interp = filename;
retval = bprm_mm_init(bprm);
if (retval)
goto out_file;
bprm->argc = count(argv, MAX_ARG_STRINGS);
if ((retval = bprm->argc) < 0)
goto out;
bprm->envc = count(envp, MAX_ARG_STRINGS);
if ((retval = bprm->envc) < 0)
goto out;
retval = prepare_binprm(bprm);
if (retval < 0)
goto out;
retval = copy_strings_kernel(1, &bprm->filename, bprm);
if (retval < 0)
goto out;
bprm->exec = bprm->p;
retval = copy_strings(bprm->envc, envp, bprm);
if (retval < 0)
goto out;
retval = copy_strings(bprm->argc, argv, bprm);
if (retval < 0)
goto out;
retval = search_binary_handler(bprm);
if (retval < 0)
goto out;
/* execve succeeded */
current->fs->in_exec = 0;
current->in_execve = 0;
acct_update_integrals(current);
free_bprm(bprm);
if (displaced)
put_files_struct(displaced);
return retval;
out:
if (bprm->mm) {
acct_arg_size(bprm, 0);
mmput(bprm->mm);
}
out_file:
if (bprm->file) {
allow_write_access(bprm->file);
fput(bprm->file);
}
out_unmark:
if (clear_in_exec)
current->fs->in_exec = 0;
current->in_execve = 0;
out_free:
free_bprm(bprm);
out_files:
if (displaced)
reset_files_struct(displaced);
out_ret:
return retval;
}
int do_execve(const char *filename,
const char __user *const __user *__argv,
const char __user *const __user *__envp)
{
struct user_arg_ptr argv = { .ptr.native = __argv };
struct user_arg_ptr envp = { .ptr.native = __envp };
return do_execve_common(filename, argv, envp);
}
#ifdef CONFIG_COMPAT
static int compat_do_execve(const char *filename,
const compat_uptr_t __user *__argv,
const compat_uptr_t __user *__envp)
{
struct user_arg_ptr argv = {
.is_compat = true,
.ptr.compat = __argv,
};
struct user_arg_ptr envp = {
.is_compat = true,
.ptr.compat = __envp,
};
return do_execve_common(filename, argv, envp);
}
#endif
void set_binfmt(struct linux_binfmt *new)
{
struct mm_struct *mm = current->mm;
if (mm->binfmt)
module_put(mm->binfmt->module);
mm->binfmt = new;
if (new)
__module_get(new->module);
}
EXPORT_SYMBOL(set_binfmt);
/*
* set_dumpable converts traditional three-value dumpable to two flags and
* stores them into mm->flags. It modifies lower two bits of mm->flags, but
* these bits are not changed atomically. So get_dumpable can observe the
* intermediate state. To avoid doing unexpected behavior, get get_dumpable
* return either old dumpable or new one by paying attention to the order of
* modifying the bits.
*
* dumpable | mm->flags (binary)
* old new | initial interim final
* ---------+-----------------------
* 0 1 | 00 01 01
* 0 2 | 00 10(*) 11
* 1 0 | 01 00 00
* 1 2 | 01 11 11
* 2 0 | 11 10(*) 00
* 2 1 | 11 11 01
*
* (*) get_dumpable regards interim value of 10 as 11.
*/
void set_dumpable(struct mm_struct *mm, int value)
{
switch (value) {
case SUID_DUMPABLE_DISABLED:
clear_bit(MMF_DUMPABLE, &mm->flags);
smp_wmb();
clear_bit(MMF_DUMP_SECURELY, &mm->flags);
break;
case SUID_DUMPABLE_ENABLED:
set_bit(MMF_DUMPABLE, &mm->flags);
smp_wmb();
clear_bit(MMF_DUMP_SECURELY, &mm->flags);
break;
case SUID_DUMPABLE_SAFE:
set_bit(MMF_DUMP_SECURELY, &mm->flags);
smp_wmb();
set_bit(MMF_DUMPABLE, &mm->flags);
break;
}
}
int __get_dumpable(unsigned long mm_flags)
{
int ret;
ret = mm_flags & MMF_DUMPABLE_MASK;
return (ret > SUID_DUMPABLE_ENABLED) ? SUID_DUMPABLE_SAFE : ret;
}
int get_dumpable(struct mm_struct *mm)
{
return __get_dumpable(mm->flags);
}
#ifdef __ARCH_WANT_SYS_EXECVE
SYSCALL_DEFINE3(execve,
const char __user *, filename,
const char __user *const __user *, argv,
const char __user *const __user *, envp)
{
struct filename *path = getname(filename);
int error = PTR_ERR(path);
if (!IS_ERR(path)) {
error = do_execve(path->name, argv, envp);
putname(path);
}
return error;
}
#ifdef CONFIG_COMPAT
asmlinkage long compat_sys_execve(const char __user * filename,
const compat_uptr_t __user * argv,
const compat_uptr_t __user * envp)
{
struct filename *path = getname(filename);
int error = PTR_ERR(path);
if (!IS_ERR(path)) {
error = compat_do_execve(path->name, argv, envp);
putname(path);
}
return error;
}
#endif
#endif
#ifdef __ARCH_WANT_KERNEL_EXECVE
int kernel_execve(const char *filename,
const char *const argv[],
const char *const envp[])
{
int ret = do_execve(filename,
(const char __user *const __user *)argv,
(const char __user *const __user *)envp);
if (ret < 0)
return ret;
/*
* We were successful. We won't be returning to our caller, but
* instead to user space by manipulating the kernel stack.
*/
ret_from_kernel_execve(current_pt_regs());
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_3769_2 |
crossvul-cpp_data_bad_2828_0 | /*
* History:
* Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
* to allow user process control of SCSI devices.
* Development Sponsored by Killy Corp. NY NY
*
* Original driver (sg.c):
* Copyright (C) 1992 Lawrence Foard
* Version 2 and 3 extensions to driver:
* Copyright (C) 1998 - 2014 Douglas Gilbert
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
*/
static int sg_version_num = 30536; /* 2 digits for each component */
#define SG_VERSION_STR "3.5.36"
/*
* D. P. Gilbert (dgilbert@interlog.com), notes:
* - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
* the kernel/module needs to be built with CONFIG_SCSI_LOGGING
* (otherwise the macros compile to empty statements).
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/mtio.h>
#include <linux/ioctl.h>
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/moduleparam.h>
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/seq_file.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/blktrace_api.h>
#include <linux/mutex.h>
#include <linux/atomic.h>
#include <linux/ratelimit.h>
#include <linux/uio.h>
#include "scsi.h"
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/sg.h>
#include "scsi_logging.h"
#ifdef CONFIG_SCSI_PROC_FS
#include <linux/proc_fs.h>
static char *sg_version_date = "20140603";
static int sg_proc_init(void);
static void sg_proc_cleanup(void);
#endif
#define SG_ALLOW_DIO_DEF 0
#define SG_MAX_DEVS 32768
/* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type
* of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater
* than 16 bytes are "variable length" whose length is a multiple of 4
*/
#define SG_MAX_CDB_SIZE 252
#define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
int sg_big_buff = SG_DEF_RESERVED_SIZE;
/* N.B. This variable is readable and writeable via
/proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
of this size (or less if there is not enough memory) will be reserved
for use by this file descriptor. [Deprecated usage: this variable is also
readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
the kernel (i.e. it is not a module).] */
static int def_reserved_size = -1; /* picks up init parameter */
static int sg_allow_dio = SG_ALLOW_DIO_DEF;
static int scatter_elem_sz = SG_SCATTER_SZ;
static int scatter_elem_sz_prev = SG_SCATTER_SZ;
#define SG_SECTOR_SZ 512
static int sg_add_device(struct device *, struct class_interface *);
static void sg_remove_device(struct device *, struct class_interface *);
static DEFINE_IDR(sg_index_idr);
static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
file descriptor list for device */
static struct class_interface sg_interface = {
.add_dev = sg_add_device,
.remove_dev = sg_remove_device,
};
typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
unsigned bufflen; /* Size of (aggregate) data buffer */
struct page **pages;
int page_order;
char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
unsigned char cmd_opcode; /* first byte of command */
} Sg_scatter_hold;
struct sg_device; /* forward declarations */
struct sg_fd;
typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
struct list_head entry; /* list entry */
struct sg_fd *parentfp; /* NULL -> not in use */
Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
char orphan; /* 1 -> drop on sight, 0 -> normal */
char sg_io_owned; /* 1 -> packet belongs to SG_IO */
/* done protected by rq_list_lock */
char done; /* 0->before bh, 1->before read, 2->read */
struct request *rq;
struct bio *bio;
struct execute_work ew;
} Sg_request;
typedef struct sg_fd { /* holds the state of a file descriptor */
struct list_head sfd_siblings; /* protected by device's sfd_lock */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
struct mutex f_mutex; /* protect against changes in this fd */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
struct list_head rq_list; /* head of request list */
struct fasync_struct *async_qp; /* used by asynchronous notification */
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
char mmap_called; /* 0 -> mmap() never called on this fd */
char res_in_use; /* 1 -> 'reserve' array in use */
struct kref f_ref;
struct execute_work ew;
} Sg_fd;
typedef struct sg_device { /* holds the state of each scsi generic device */
struct scsi_device *device;
wait_queue_head_t open_wait; /* queue open() when O_EXCL present */
struct mutex open_rel_lock; /* held when in open() or release() */
int sg_tablesize; /* adapter's max scatter-gather table size */
u32 index; /* device index number */
struct list_head sfds;
rwlock_t sfd_lock; /* protect access to sfd list */
atomic_t detaching; /* 0->device usable, 1->device detaching */
bool exclude; /* 1->open(O_EXCL) succeeded and is active */
int open_cnt; /* count of opens (perhaps < num(sfds) ) */
char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
struct gendisk *disk;
struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
struct kref d_ref;
} Sg_device;
/* tasklet or soft irq callback */
static void sg_rq_end_io(struct request *rq, blk_status_t status);
static int sg_start_req(Sg_request *srp, unsigned char *cmd);
static int sg_finish_rem_req(Sg_request * srp);
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
Sg_request * srp);
static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
const char __user *buf, size_t count, int blocking,
int read_only, int sg_io_owned, Sg_request **o_srp);
static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking);
static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp);
static void sg_build_reserve(Sg_fd * sfp, int req_size);
static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
static Sg_fd *sg_add_sfp(Sg_device * sdp);
static void sg_remove_sfp(struct kref *);
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static Sg_device *sg_get_dev(int dev);
static void sg_device_destroy(struct kref *kref);
#define SZ_SG_HEADER sizeof(struct sg_header)
#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
#define SZ_SG_IOVEC sizeof(sg_iovec_t)
#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
#define sg_printk(prefix, sdp, fmt, a...) \
sdev_prefix_printk(prefix, (sdp)->device, \
(sdp)->disk->disk_name, fmt, ##a)
static int sg_allow_access(struct file *filp, unsigned char *cmd)
{
struct sg_fd *sfp = filp->private_data;
if (sfp->parentdp->device->type == TYPE_SCANNER)
return 0;
return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
}
static int
open_wait(Sg_device *sdp, int flags)
{
int retval = 0;
if (flags & O_EXCL) {
while (sdp->open_cnt > 0) {
mutex_unlock(&sdp->open_rel_lock);
retval = wait_event_interruptible(sdp->open_wait,
(atomic_read(&sdp->detaching) ||
!sdp->open_cnt));
mutex_lock(&sdp->open_rel_lock);
if (retval) /* -ERESTARTSYS */
return retval;
if (atomic_read(&sdp->detaching))
return -ENODEV;
}
} else {
while (sdp->exclude) {
mutex_unlock(&sdp->open_rel_lock);
retval = wait_event_interruptible(sdp->open_wait,
(atomic_read(&sdp->detaching) ||
!sdp->exclude));
mutex_lock(&sdp->open_rel_lock);
if (retval) /* -ERESTARTSYS */
return retval;
if (atomic_read(&sdp->detaching))
return -ENODEV;
}
}
return retval;
}
/* Returns 0 on success, else a negated errno value */
static int
sg_open(struct inode *inode, struct file *filp)
{
int dev = iminor(inode);
int flags = filp->f_flags;
struct request_queue *q;
Sg_device *sdp;
Sg_fd *sfp;
int retval;
nonseekable_open(inode, filp);
if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE)))
return -EPERM; /* Can't lock it with read only access */
sdp = sg_get_dev(dev);
if (IS_ERR(sdp))
return PTR_ERR(sdp);
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_open: flags=0x%x\n", flags));
/* This driver's module count bumped by fops_get in <linux/fs.h> */
/* Prevent the device driver from vanishing while we sleep */
retval = scsi_device_get(sdp->device);
if (retval)
goto sg_put;
retval = scsi_autopm_get_device(sdp->device);
if (retval)
goto sdp_put;
/* scsi_block_when_processing_errors() may block so bypass
* check if O_NONBLOCK. Permits SCSI commands to be issued
* during error recovery. Tread carefully. */
if (!((flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device))) {
retval = -ENXIO;
/* we are in error recovery for this device */
goto error_out;
}
mutex_lock(&sdp->open_rel_lock);
if (flags & O_NONBLOCK) {
if (flags & O_EXCL) {
if (sdp->open_cnt > 0) {
retval = -EBUSY;
goto error_mutex_locked;
}
} else {
if (sdp->exclude) {
retval = -EBUSY;
goto error_mutex_locked;
}
}
} else {
retval = open_wait(sdp, flags);
if (retval) /* -ERESTARTSYS or -ENODEV */
goto error_mutex_locked;
}
/* N.B. at this point we are holding the open_rel_lock */
if (flags & O_EXCL)
sdp->exclude = true;
if (sdp->open_cnt < 1) { /* no existing opens */
sdp->sgdebug = 0;
q = sdp->device->request_queue;
sdp->sg_tablesize = queue_max_segments(q);
}
sfp = sg_add_sfp(sdp);
if (IS_ERR(sfp)) {
retval = PTR_ERR(sfp);
goto out_undo;
}
filp->private_data = sfp;
sdp->open_cnt++;
mutex_unlock(&sdp->open_rel_lock);
retval = 0;
sg_put:
kref_put(&sdp->d_ref, sg_device_destroy);
return retval;
out_undo:
if (flags & O_EXCL) {
sdp->exclude = false; /* undo if error */
wake_up_interruptible(&sdp->open_wait);
}
error_mutex_locked:
mutex_unlock(&sdp->open_rel_lock);
error_out:
scsi_autopm_put_device(sdp->device);
sdp_put:
scsi_device_put(sdp->device);
goto sg_put;
}
/* Release resources associated with a successful sg_open()
* Returns 0 on success, else a negated errno value */
static int
sg_release(struct inode *inode, struct file *filp)
{
Sg_device *sdp;
Sg_fd *sfp;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n"));
mutex_lock(&sdp->open_rel_lock);
scsi_autopm_put_device(sdp->device);
kref_put(&sfp->f_ref, sg_remove_sfp);
sdp->open_cnt--;
/* possibly many open()s waiting on exlude clearing, start many;
* only open(O_EXCL)s wait on 0==open_cnt so only start one */
if (sdp->exclude) {
sdp->exclude = false;
wake_up_interruptible_all(&sdp->open_wait);
} else if (0 == sdp->open_cnt) {
wake_up_interruptible(&sdp->open_wait);
}
mutex_unlock(&sdp->open_rel_lock);
return 0;
}
static ssize_t
sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
{
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
int req_pack_id = -1;
sg_io_hdr_t *hp;
struct sg_header *old_hdr = NULL;
int retval = 0;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_read: count=%d\n", (int) count));
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
if (!old_hdr)
return -ENOMEM;
if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
if (old_hdr->reply_len < 0) {
if (count >= SZ_SG_IO_HDR) {
sg_io_hdr_t *new_hdr;
new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
if (!new_hdr) {
retval = -ENOMEM;
goto free_old_hdr;
}
retval =__copy_from_user
(new_hdr, buf, SZ_SG_IO_HDR);
req_pack_id = new_hdr->pack_id;
kfree(new_hdr);
if (retval) {
retval = -EFAULT;
goto free_old_hdr;
}
}
} else
req_pack_id = old_hdr->pack_id;
}
srp = sg_get_rq_mark(sfp, req_pack_id);
if (!srp) { /* now wait on packet to arrive */
if (atomic_read(&sdp->detaching)) {
retval = -ENODEV;
goto free_old_hdr;
}
if (filp->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto free_old_hdr;
}
retval = wait_event_interruptible(sfp->read_wait,
(atomic_read(&sdp->detaching) ||
(srp = sg_get_rq_mark(sfp, req_pack_id))));
if (atomic_read(&sdp->detaching)) {
retval = -ENODEV;
goto free_old_hdr;
}
if (retval) {
/* -ERESTARTSYS as signal hit process */
goto free_old_hdr;
}
}
if (srp->header.interface_id != '\0') {
retval = sg_new_read(sfp, buf, count, srp);
goto free_old_hdr;
}
hp = &srp->header;
if (old_hdr == NULL) {
old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
if (! old_hdr) {
retval = -ENOMEM;
goto free_old_hdr;
}
}
memset(old_hdr, 0, SZ_SG_HEADER);
old_hdr->reply_len = (int) hp->timeout;
old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
old_hdr->pack_id = hp->pack_id;
old_hdr->twelve_byte =
((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
old_hdr->target_status = hp->masked_status;
old_hdr->host_status = hp->host_status;
old_hdr->driver_status = hp->driver_status;
if ((CHECK_CONDITION & hp->masked_status) ||
(DRIVER_SENSE & hp->driver_status))
memcpy(old_hdr->sense_buffer, srp->sense_b,
sizeof (old_hdr->sense_buffer));
switch (hp->host_status) {
/* This setup of 'result' is for backward compatibility and is best
ignored by the user who should use target, host + driver status */
case DID_OK:
case DID_PASSTHROUGH:
case DID_SOFT_ERROR:
old_hdr->result = 0;
break;
case DID_NO_CONNECT:
case DID_BUS_BUSY:
case DID_TIME_OUT:
old_hdr->result = EBUSY;
break;
case DID_BAD_TARGET:
case DID_ABORT:
case DID_PARITY:
case DID_RESET:
case DID_BAD_INTR:
old_hdr->result = EIO;
break;
case DID_ERROR:
old_hdr->result = (srp->sense_b[0] == 0 &&
hp->masked_status == GOOD) ? 0 : EIO;
break;
default:
old_hdr->result = EIO;
break;
}
/* Now copy the result back to the user buffer. */
if (count >= SZ_SG_HEADER) {
if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
buf += SZ_SG_HEADER;
if (count > old_hdr->reply_len)
count = old_hdr->reply_len;
if (count > SZ_SG_HEADER) {
if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
}
} else
count = (old_hdr->result == 0) ? 0 : -EIO;
sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
retval = count;
free_old_hdr:
kfree(old_hdr);
return retval;
}
static ssize_t
sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
{
sg_io_hdr_t *hp = &srp->header;
int err = 0, err2;
int len;
if (count < SZ_SG_IO_HDR) {
err = -EINVAL;
goto err_out;
}
hp->sb_len_wr = 0;
if ((hp->mx_sb_len > 0) && hp->sbp) {
if ((CHECK_CONDITION & hp->masked_status) ||
(DRIVER_SENSE & hp->driver_status)) {
int sb_len = SCSI_SENSE_BUFFERSIZE;
sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
len = (len > sb_len) ? sb_len : len;
if (copy_to_user(hp->sbp, srp->sense_b, len)) {
err = -EFAULT;
goto err_out;
}
hp->sb_len_wr = len;
}
}
if (hp->masked_status || hp->host_status || hp->driver_status)
hp->info |= SG_INFO_CHECK;
if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
err = -EFAULT;
goto err_out;
}
err_out:
err2 = sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
return err ? : err2 ? : count;
}
static ssize_t
sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
{
int mxsize, cmd_size, k;
int input_size, blocking;
unsigned char opcode;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
struct sg_header old_hdr;
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
if (unlikely(uaccess_kernel()))
return -EINVAL;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_write: count=%d\n", (int) count));
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (!((filp->f_flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device)))
return -ENXIO;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT; /* protects following copy_from_user()s + get_user()s */
if (count < SZ_SG_HEADER)
return -EIO;
if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
return -EFAULT;
blocking = !(filp->f_flags & O_NONBLOCK);
if (old_hdr.reply_len < 0)
return sg_new_write(sfp, filp, buf, count,
blocking, 0, 0, NULL);
if (count < (SZ_SG_HEADER + 6))
return -EIO; /* The minimum scsi command length is 6 bytes. */
if (!(srp = sg_add_request(sfp))) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
"sg_write: queue full\n"));
return -EDOM;
}
buf += SZ_SG_HEADER;
__get_user(opcode, buf);
mutex_lock(&sfp->f_mutex);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */
} else {
cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12;
}
mutex_unlock(&sfp->f_mutex);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */
input_size = count - cmd_size;
mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
mxsize -= SZ_SG_HEADER;
input_size -= SZ_SG_HEADER;
if (input_size < 0) {
sg_remove_request(sfp, srp);
return -EIO; /* User did not pass enough bytes for this command. */
}
hp = &srp->header;
hp->interface_id = '\0'; /* indicator of old interface tunnelled */
hp->cmd_len = (unsigned char) cmd_size;
hp->iovec_count = 0;
hp->mx_sb_len = 0;
if (input_size > 0)
hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
else
hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
hp->dxfer_len = mxsize;
if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
(hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
hp->dxferp = (char __user *)buf + cmd_size;
else
hp->dxferp = NULL;
hp->sbp = NULL;
hp->timeout = old_hdr.reply_len; /* structure abuse ... */
hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL;
if (__copy_from_user(cmnd, buf, cmd_size))
return -EFAULT;
/*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
* but is is possible that the app intended SG_DXFER_TO_DEV, because there
* is a non-zero input_size, so emit a warning.
*/
if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
printk_ratelimited(KERN_WARNING
"sg_write: data in/out %d/%d bytes "
"for SCSI command 0x%x-- guessing "
"data in;\n program %s not setting "
"count and/or reply_len properly\n",
old_hdr.reply_len - (int)SZ_SG_HEADER,
input_size, (unsigned int) cmnd[0],
current->comm);
}
k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
return (k < 0) ? k : count;
}
static ssize_t
sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
size_t count, int blocking, int read_only, int sg_io_owned,
Sg_request **o_srp)
{
int k;
Sg_request *srp;
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
int timeout;
unsigned long ul_timeout;
if (count < SZ_SG_IO_HDR)
return -EINVAL;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT; /* protects following copy_from_user()s + get_user()s */
sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
if (!(srp = sg_add_request(sfp))) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_new_write: queue full\n"));
return -EDOM;
}
srp->sg_io_owned = sg_io_owned;
hp = &srp->header;
if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
sg_remove_request(sfp, srp);
return -EFAULT;
}
if (hp->interface_id != 'S') {
sg_remove_request(sfp, srp);
return -ENOSYS;
}
if (hp->flags & SG_FLAG_MMAP_IO) {
if (hp->dxfer_len > sfp->reserve.bufflen) {
sg_remove_request(sfp, srp);
return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
}
if (hp->flags & SG_FLAG_DIRECT_IO) {
sg_remove_request(sfp, srp);
return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
}
if (sfp->res_in_use) {
sg_remove_request(sfp, srp);
return -EBUSY; /* reserve buffer already being used */
}
}
ul_timeout = msecs_to_jiffies(srp->header.timeout);
timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
sg_remove_request(sfp, srp);
return -EMSGSIZE;
}
if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
sg_remove_request(sfp, srp);
return -EFAULT; /* protects following copy_from_user()s + get_user()s */
}
if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
sg_remove_request(sfp, srp);
return -EFAULT;
}
if (read_only && sg_allow_access(file, cmnd)) {
sg_remove_request(sfp, srp);
return -EPERM;
}
k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
if (k < 0)
return k;
if (o_srp)
*o_srp = srp;
return count;
}
static int
sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking)
{
int k, at_head;
Sg_device *sdp = sfp->parentdp;
sg_io_hdr_t *hp = &srp->header;
srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
hp->status = 0;
hp->masked_status = 0;
hp->msg_status = 0;
hp->info = 0;
hp->host_status = 0;
hp->driver_status = 0;
hp->resid = 0;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
if (hp->dxfer_len >= SZ_256M)
return -EINVAL;
k = sg_start_req(srp, cmnd);
if (k) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_common_write: start_req err=%d\n", k));
sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
return k; /* probably out of space --> ENOMEM */
}
if (atomic_read(&sdp->detaching)) {
if (srp->bio) {
scsi_req_free_cmd(scsi_req(srp->rq));
blk_end_request_all(srp->rq, BLK_STS_IOERR);
srp->rq = NULL;
}
sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
return -ENODEV;
}
hp->duration = jiffies_to_msecs(jiffies);
if (hp->interface_id != '\0' && /* v3 (or later) interface */
(SG_FLAG_Q_AT_TAIL & hp->flags))
at_head = 0;
else
at_head = 1;
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
srp->rq, at_head, sg_rq_end_io);
return 0;
}
static int srp_done(Sg_fd *sfp, Sg_request *srp)
{
unsigned long flags;
int ret;
read_lock_irqsave(&sfp->rq_list_lock, flags);
ret = srp->done;
read_unlock_irqrestore(&sfp->rq_list_lock, flags);
return ret;
}
static int max_sectors_bytes(struct request_queue *q)
{
unsigned int max_sectors = queue_max_sectors(q);
max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9);
return max_sectors << 9;
}
static void
sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
{
Sg_request *srp;
int val;
unsigned int ms;
val = 0;
list_for_each_entry(srp, &sfp->rq_list, entry) {
if (val > SG_MAX_QUEUE)
break;
memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
rinfo[val].req_state = srp->done + 1;
rinfo[val].problem =
srp->header.masked_status &
srp->header.host_status &
srp->header.driver_status;
if (srp->done)
rinfo[val].duration =
srp->header.duration;
else {
ms = jiffies_to_msecs(jiffies);
rinfo[val].duration =
(ms > srp->header.duration) ?
(ms - srp->header.duration) : 0;
}
rinfo[val].orphan = srp->orphan;
rinfo[val].sg_io_owned = srp->sg_io_owned;
rinfo[val].pack_id = srp->header.pack_id;
rinfo[val].usr_ptr = srp->header.usr_ptr;
val++;
}
}
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
void __user *p = (void __user *)arg;
int __user *ip = p;
int result, val, read_only;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
unsigned long iflags;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_ioctl: cmd=0x%x\n", (int) cmd_in));
read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
switch (cmd_in) {
case SG_IO:
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (!scsi_block_when_processing_errors(sdp->device))
return -ENXIO;
if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
return -EFAULT;
result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
1, read_only, 1, &srp);
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
(srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
if (atomic_read(&sdp->detaching))
return -ENODEV;
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
srp->done = 2;
write_unlock_irq(&sfp->rq_list_lock);
result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
return (result < 0) ? result : 0;
}
srp->orphan = 1;
write_unlock_irq(&sfp->rq_list_lock);
return result; /* -ERESTARTSYS because signal hit process */
case SG_SET_TIMEOUT:
result = get_user(val, ip);
if (result)
return result;
if (val < 0)
return -EIO;
if (val >= mult_frac((s64)INT_MAX, USER_HZ, HZ))
val = min_t(s64, mult_frac((s64)INT_MAX, USER_HZ, HZ),
INT_MAX);
sfp->timeout_user = val;
sfp->timeout = mult_frac(val, HZ, USER_HZ);
return 0;
case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
/* strange ..., for backward compatibility */
return sfp->timeout_user;
case SG_SET_FORCE_LOW_DMA:
/*
* N.B. This ioctl never worked properly, but failed to
* return an error value. So returning '0' to keep compability
* with legacy applications.
*/
return 0;
case SG_GET_LOW_DMA:
return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
case SG_GET_SCSI_ID:
if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
return -EFAULT;
else {
sg_scsi_id_t __user *sg_idp = p;
if (atomic_read(&sdp->detaching))
return -ENODEV;
__put_user((int) sdp->device->host->host_no,
&sg_idp->host_no);
__put_user((int) sdp->device->channel,
&sg_idp->channel);
__put_user((int) sdp->device->id, &sg_idp->scsi_id);
__put_user((int) sdp->device->lun, &sg_idp->lun);
__put_user((int) sdp->device->type, &sg_idp->scsi_type);
__put_user((short) sdp->device->host->cmd_per_lun,
&sg_idp->h_cmd_per_lun);
__put_user((short) sdp->device->queue_depth,
&sg_idp->d_queue_depth);
__put_user(0, &sg_idp->unused[0]);
__put_user(0, &sg_idp->unused[1]);
return 0;
}
case SG_SET_FORCE_PACK_ID:
result = get_user(val, ip);
if (result)
return result;
sfp->force_packid = val ? 1 : 0;
return 0;
case SG_GET_PACK_ID:
if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
__put_user(srp->header.pack_id, ip);
return 0;
}
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
__put_user(-1, ip);
return 0;
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
val = 0;
list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned))
++val;
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return put_user(val, ip);
case SG_GET_SG_TABLESIZE:
return put_user(sdp->sg_tablesize, ip);
case SG_SET_RESERVED_SIZE:
result = get_user(val, ip);
if (result)
return result;
if (val < 0)
return -EINVAL;
val = min_t(int, val,
max_sectors_bytes(sdp->device->request_queue));
mutex_lock(&sfp->f_mutex);
if (val != sfp->reserve.bufflen) {
if (sfp->mmap_called ||
sfp->res_in_use) {
mutex_unlock(&sfp->f_mutex);
return -EBUSY;
}
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
mutex_unlock(&sfp->f_mutex);
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
max_sectors_bytes(sdp->device->request_queue));
return put_user(val, ip);
case SG_SET_COMMAND_Q:
result = get_user(val, ip);
if (result)
return result;
sfp->cmd_q = val ? 1 : 0;
return 0;
case SG_GET_COMMAND_Q:
return put_user((int) sfp->cmd_q, ip);
case SG_SET_KEEP_ORPHAN:
result = get_user(val, ip);
if (result)
return result;
sfp->keep_orphan = val;
return 0;
case SG_GET_KEEP_ORPHAN:
return put_user((int) sfp->keep_orphan, ip);
case SG_NEXT_CMD_LEN:
result = get_user(val, ip);
if (result)
return result;
if (val > SG_MAX_CDB_SIZE)
return -ENOMEM;
sfp->next_cmd_len = (val > 0) ? val : 0;
return 0;
case SG_GET_VERSION_NUM:
return put_user(sg_version_num, ip);
case SG_GET_ACCESS_COUNT:
/* faked - we don't have a real access count anymore */
val = (sdp->device ? 1 : 0);
return put_user(val, ip);
case SG_GET_REQUEST_TABLE:
if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
return -EFAULT;
else {
sg_req_info_t *rinfo;
rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
GFP_KERNEL);
if (!rinfo)
return -ENOMEM;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
sg_fill_request_table(sfp, rinfo);
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
result = __copy_to_user(p, rinfo,
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
result = result ? -EFAULT : 0;
kfree(rinfo);
return result;
}
case SG_EMULATED_HOST:
if (atomic_read(&sdp->detaching))
return -ENODEV;
return put_user(sdp->device->host->hostt->emulated, ip);
case SCSI_IOCTL_SEND_COMMAND:
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (read_only) {
unsigned char opcode = WRITE_6;
Scsi_Ioctl_Command __user *siocp = p;
if (copy_from_user(&opcode, siocp->data, 1))
return -EFAULT;
if (sg_allow_access(filp, &opcode))
return -EPERM;
}
return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
return result;
sdp->sgdebug = (char) val;
return 0;
case BLKSECTGET:
return put_user(max_sectors_bytes(sdp->device->request_queue),
ip);
case BLKTRACESETUP:
return blk_trace_setup(sdp->device->request_queue,
sdp->disk->disk_name,
MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
NULL, p);
case BLKTRACESTART:
return blk_trace_startstop(sdp->device->request_queue, 1);
case BLKTRACESTOP:
return blk_trace_startstop(sdp->device->request_queue, 0);
case BLKTRACETEARDOWN:
return blk_trace_remove(sdp->device->request_queue);
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
case SCSI_IOCTL_PROBE_HOST:
case SG_GET_TRANSFORM:
case SG_SCSI_RESET:
if (atomic_read(&sdp->detaching))
return -ENODEV;
break;
default:
if (read_only)
return -EPERM; /* don't know so take safe approach */
break;
}
result = scsi_ioctl_block_when_processing_errors(sdp->device,
cmd_in, filp->f_flags & O_NDELAY);
if (result)
return result;
return scsi_ioctl(sdp->device, cmd_in, p);
}
#ifdef CONFIG_COMPAT
static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
Sg_device *sdp;
Sg_fd *sfp;
struct scsi_device *sdev;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
sdev = sdp->device;
if (sdev->host->hostt->compat_ioctl) {
int ret;
ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
return ret;
}
return -ENOIOCTLCMD;
}
#endif
static unsigned int
sg_poll(struct file *filp, poll_table * wait)
{
unsigned int res = 0;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
int count = 0;
unsigned long iflags;
sfp = filp->private_data;
if (!sfp)
return POLLERR;
sdp = sfp->parentdp;
if (!sdp)
return POLLERR;
poll_wait(filp, &sfp->read_wait, wait);
read_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(srp, &sfp->rq_list, entry) {
/* if any read waiting, flag it */
if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
res = POLLIN | POLLRDNORM;
++count;
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (atomic_read(&sdp->detaching))
res |= POLLHUP;
else if (!sfp->cmd_q) {
if (0 == count)
res |= POLLOUT | POLLWRNORM;
} else if (count < SG_MAX_QUEUE)
res |= POLLOUT | POLLWRNORM;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_poll: res=0x%x\n", (int) res));
return res;
}
static int
sg_fasync(int fd, struct file *filp, int mode)
{
Sg_device *sdp;
Sg_fd *sfp;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_fasync: mode=%d\n", mode));
return fasync_helper(fd, filp, mode, &sfp->async_qp);
}
static int
sg_vma_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
Sg_fd *sfp;
unsigned long offset, len, sa;
Sg_scatter_hold *rsv_schp;
int k, length;
if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
return VM_FAULT_SIGBUS;
rsv_schp = &sfp->reserve;
offset = vmf->pgoff << PAGE_SHIFT;
if (offset >= rsv_schp->bufflen)
return VM_FAULT_SIGBUS;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
"sg_vma_fault: offset=%lu, scatg=%d\n",
offset, rsv_schp->k_use_sg));
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
len = vma->vm_end - sa;
len = (len < length) ? len : length;
if (offset < len) {
struct page *page = nth_page(rsv_schp->pages[k],
offset >> PAGE_SHIFT);
get_page(page); /* increment page count */
vmf->page = page;
return 0; /* success */
}
sa += len;
offset -= len;
}
return VM_FAULT_SIGBUS;
}
static const struct vm_operations_struct sg_mmap_vm_ops = {
.fault = sg_vma_fault,
};
static int
sg_mmap(struct file *filp, struct vm_area_struct *vma)
{
Sg_fd *sfp;
unsigned long req_sz, len, sa;
Sg_scatter_hold *rsv_schp;
int k, length;
int ret = 0;
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
return -ENXIO;
req_sz = vma->vm_end - vma->vm_start;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
"sg_mmap starting, vm_start=%p, len=%d\n",
(void *) vma->vm_start, (int) req_sz));
if (vma->vm_pgoff)
return -EINVAL; /* want no offset */
rsv_schp = &sfp->reserve;
mutex_lock(&sfp->f_mutex);
if (req_sz > rsv_schp->bufflen) {
ret = -ENOMEM; /* cannot map more than reserved buffer */
goto out;
}
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
len = vma->vm_end - sa;
len = (len < length) ? len : length;
sa += len;
}
sfp->mmap_called = 1;
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
out:
mutex_unlock(&sfp->f_mutex);
return ret;
}
static void
sg_rq_end_io_usercontext(struct work_struct *work)
{
struct sg_request *srp = container_of(work, struct sg_request, ew.work);
struct sg_fd *sfp = srp->parentfp;
sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
kref_put(&sfp->f_ref, sg_remove_sfp);
}
/*
* This function is a "bottom half" handler that is called by the mid
* level when a command is completed (or has failed).
*/
static void
sg_rq_end_io(struct request *rq, blk_status_t status)
{
struct sg_request *srp = rq->end_io_data;
struct scsi_request *req = scsi_req(rq);
Sg_device *sdp;
Sg_fd *sfp;
unsigned long iflags;
unsigned int ms;
char *sense;
int result, resid, done = 1;
if (WARN_ON(srp->done != 0))
return;
sfp = srp->parentfp;
if (WARN_ON(sfp == NULL))
return;
sdp = sfp->parentdp;
if (unlikely(atomic_read(&sdp->detaching)))
pr_info("%s: device detaching\n", __func__);
sense = req->sense;
result = req->result;
resid = req->resid_len;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_cmd_done: pack_id=%d, res=0x%x\n",
srp->header.pack_id, result));
srp->header.resid = resid;
ms = jiffies_to_msecs(jiffies);
srp->header.duration = (ms > srp->header.duration) ?
(ms - srp->header.duration) : 0;
if (0 != result) {
struct scsi_sense_hdr sshdr;
srp->header.status = 0xff & result;
srp->header.masked_status = status_byte(result);
srp->header.msg_status = msg_byte(result);
srp->header.host_status = host_byte(result);
srp->header.driver_status = driver_byte(result);
if ((sdp->sgdebug > 0) &&
((CHECK_CONDITION == srp->header.masked_status) ||
(COMMAND_TERMINATED == srp->header.masked_status)))
__scsi_print_sense(sdp->device, __func__, sense,
SCSI_SENSE_BUFFERSIZE);
/* Following if statement is a patch supplied by Eric Youngdale */
if (driver_byte(result) != 0
&& scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
&& !scsi_sense_is_deferred(&sshdr)
&& sshdr.sense_key == UNIT_ATTENTION
&& sdp->device->removable) {
/* Detected possible disc change. Set the bit - this */
/* may be used if there are filesystems using this device */
sdp->device->changed = 1;
}
}
if (req->sense_len)
memcpy(srp->sense_b, req->sense, SCSI_SENSE_BUFFERSIZE);
/* Rely on write phase to clean out srp status values, so no "else" */
/*
* Free the request as soon as it is complete so that its resources
* can be reused without waiting for userspace to read() the
* result. But keep the associated bio (if any) around until
* blk_rq_unmap_user() can be called from user context.
*/
srp->rq = NULL;
scsi_req_free_cmd(scsi_req(rq));
__blk_put_request(rq->q, rq);
write_lock_irqsave(&sfp->rq_list_lock, iflags);
if (unlikely(srp->orphan)) {
if (sfp->keep_orphan)
srp->sg_io_owned = 0;
else
done = 0;
}
srp->done = done;
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (likely(done)) {
/* Now wake up any sg_read() that is waiting for this
* packet.
*/
wake_up_interruptible(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
kref_put(&sfp->f_ref, sg_remove_sfp);
} else {
INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
schedule_work(&srp->ew.work);
}
}
static const struct file_operations sg_fops = {
.owner = THIS_MODULE,
.read = sg_read,
.write = sg_write,
.poll = sg_poll,
.unlocked_ioctl = sg_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sg_compat_ioctl,
#endif
.open = sg_open,
.mmap = sg_mmap,
.release = sg_release,
.fasync = sg_fasync,
.llseek = no_llseek,
};
static struct class *sg_sysfs_class;
static int sg_sysfs_valid = 0;
static Sg_device *
sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
{
struct request_queue *q = scsidp->request_queue;
Sg_device *sdp;
unsigned long iflags;
int error;
u32 k;
sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
if (!sdp) {
sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device "
"failure\n", __func__);
return ERR_PTR(-ENOMEM);
}
idr_preload(GFP_KERNEL);
write_lock_irqsave(&sg_index_lock, iflags);
error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT);
if (error < 0) {
if (error == -ENOSPC) {
sdev_printk(KERN_WARNING, scsidp,
"Unable to attach sg device type=%d, minor number exceeds %d\n",
scsidp->type, SG_MAX_DEVS - 1);
error = -ENODEV;
} else {
sdev_printk(KERN_WARNING, scsidp, "%s: idr "
"allocation Sg_device failure: %d\n",
__func__, error);
}
goto out_unlock;
}
k = error;
SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp,
"sg_alloc: dev=%d \n", k));
sprintf(disk->disk_name, "sg%d", k);
disk->first_minor = k;
sdp->disk = disk;
sdp->device = scsidp;
mutex_init(&sdp->open_rel_lock);
INIT_LIST_HEAD(&sdp->sfds);
init_waitqueue_head(&sdp->open_wait);
atomic_set(&sdp->detaching, 0);
rwlock_init(&sdp->sfd_lock);
sdp->sg_tablesize = queue_max_segments(q);
sdp->index = k;
kref_init(&sdp->d_ref);
error = 0;
out_unlock:
write_unlock_irqrestore(&sg_index_lock, iflags);
idr_preload_end();
if (error) {
kfree(sdp);
return ERR_PTR(error);
}
return sdp;
}
static int
sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
struct gendisk *disk;
Sg_device *sdp = NULL;
struct cdev * cdev = NULL;
int error;
unsigned long iflags;
disk = alloc_disk(1);
if (!disk) {
pr_warn("%s: alloc_disk failed\n", __func__);
return -ENOMEM;
}
disk->major = SCSI_GENERIC_MAJOR;
error = -ENOMEM;
cdev = cdev_alloc();
if (!cdev) {
pr_warn("%s: cdev_alloc failed\n", __func__);
goto out;
}
cdev->owner = THIS_MODULE;
cdev->ops = &sg_fops;
sdp = sg_alloc(disk, scsidp);
if (IS_ERR(sdp)) {
pr_warn("%s: sg_alloc failed\n", __func__);
error = PTR_ERR(sdp);
goto out;
}
error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
if (error)
goto cdev_add_err;
sdp->cdev = cdev;
if (sg_sysfs_valid) {
struct device *sg_class_member;
sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
MKDEV(SCSI_GENERIC_MAJOR,
sdp->index),
sdp, "%s", disk->disk_name);
if (IS_ERR(sg_class_member)) {
pr_err("%s: device_create failed\n", __func__);
error = PTR_ERR(sg_class_member);
goto cdev_add_err;
}
error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
&sg_class_member->kobj, "generic");
if (error)
pr_err("%s: unable to make symlink 'generic' back "
"to sg%d\n", __func__, sdp->index);
} else
pr_warn("%s: sg_sys Invalid\n", __func__);
sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d "
"type %d\n", sdp->index, scsidp->type);
dev_set_drvdata(cl_dev, sdp);
return 0;
cdev_add_err:
write_lock_irqsave(&sg_index_lock, iflags);
idr_remove(&sg_index_idr, sdp->index);
write_unlock_irqrestore(&sg_index_lock, iflags);
kfree(sdp);
out:
put_disk(disk);
if (cdev)
cdev_del(cdev);
return error;
}
static void
sg_device_destroy(struct kref *kref)
{
struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
unsigned long flags;
/* CAUTION! Note that the device can still be found via idr_find()
* even though the refcount is 0. Therefore, do idr_remove() BEFORE
* any other cleanup.
*/
write_lock_irqsave(&sg_index_lock, flags);
idr_remove(&sg_index_idr, sdp->index);
write_unlock_irqrestore(&sg_index_lock, flags);
SCSI_LOG_TIMEOUT(3,
sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
put_disk(sdp->disk);
kfree(sdp);
}
static void
sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
Sg_device *sdp = dev_get_drvdata(cl_dev);
unsigned long iflags;
Sg_fd *sfp;
int val;
if (!sdp)
return;
/* want sdp->detaching non-zero as soon as possible */
val = atomic_inc_return(&sdp->detaching);
if (val > 1)
return; /* only want to do following once per device */
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"%s\n", __func__));
read_lock_irqsave(&sdp->sfd_lock, iflags);
list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
wake_up_interruptible_all(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
}
wake_up_interruptible_all(&sdp->open_wait);
read_unlock_irqrestore(&sdp->sfd_lock, iflags);
sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
cdev_del(sdp->cdev);
sdp->cdev = NULL;
kref_put(&sdp->d_ref, sg_device_destroy);
}
module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
module_param_named(def_reserved_size, def_reserved_size, int,
S_IRUGO | S_IWUSR);
module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
MODULE_AUTHOR("Douglas Gilbert");
MODULE_DESCRIPTION("SCSI generic (sg) driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SG_VERSION_STR);
MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
"size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
static int __init
init_sg(void)
{
int rc;
if (scatter_elem_sz < PAGE_SIZE) {
scatter_elem_sz = PAGE_SIZE;
scatter_elem_sz_prev = scatter_elem_sz;
}
if (def_reserved_size >= 0)
sg_big_buff = def_reserved_size;
else
def_reserved_size = sg_big_buff;
rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
SG_MAX_DEVS, "sg");
if (rc)
return rc;
sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
if ( IS_ERR(sg_sysfs_class) ) {
rc = PTR_ERR(sg_sysfs_class);
goto err_out;
}
sg_sysfs_valid = 1;
rc = scsi_register_interface(&sg_interface);
if (0 == rc) {
#ifdef CONFIG_SCSI_PROC_FS
sg_proc_init();
#endif /* CONFIG_SCSI_PROC_FS */
return 0;
}
class_destroy(sg_sysfs_class);
err_out:
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
return rc;
}
static void __exit
exit_sg(void)
{
#ifdef CONFIG_SCSI_PROC_FS
sg_proc_cleanup();
#endif /* CONFIG_SCSI_PROC_FS */
scsi_unregister_interface(&sg_interface);
class_destroy(sg_sysfs_class);
sg_sysfs_valid = 0;
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
SG_MAX_DEVS);
idr_destroy(&sg_index_idr);
}
static int
sg_start_req(Sg_request *srp, unsigned char *cmd)
{
int res;
struct request *rq;
struct scsi_request *req;
Sg_fd *sfp = srp->parentfp;
sg_io_hdr_t *hp = &srp->header;
int dxfer_len = (int) hp->dxfer_len;
int dxfer_dir = hp->dxfer_direction;
unsigned int iov_count = hp->iovec_count;
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
struct request_queue *q = sfp->parentdp->device->request_queue;
struct rq_map_data *md, map_data;
int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
unsigned char *long_cmdp = NULL;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_start_req: dxfer_len=%d\n",
dxfer_len));
if (hp->cmd_len > BLK_MAX_CDB) {
long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
if (!long_cmdp)
return -ENOMEM;
}
/*
* NOTE
*
* With scsi-mq enabled, there are a fixed number of preallocated
* requests equal in number to shost->can_queue. If all of the
* preallocated requests are already in use, then using GFP_ATOMIC with
* blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
* will cause blk_get_request() to sleep until an active command
* completes, freeing up a request. Neither option is ideal, but
* GFP_KERNEL is the better choice to prevent userspace from getting an
* unexpected EWOULDBLOCK.
*
* With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
* does not sleep except under memory pressure.
*/
rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(rq)) {
kfree(long_cmdp);
return PTR_ERR(rq);
}
req = scsi_req(rq);
if (hp->cmd_len > BLK_MAX_CDB)
req->cmd = long_cmdp;
memcpy(req->cmd, cmd, hp->cmd_len);
req->cmd_len = hp->cmd_len;
srp->rq = rq;
rq->end_io_data = srp;
req->retries = SG_DEFAULT_RETRIES;
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
return 0;
if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
!sfp->parentdp->device->host->unchecked_isa_dma &&
blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
md = NULL;
else
md = &map_data;
if (md) {
mutex_lock(&sfp->f_mutex);
if (dxfer_len <= rsv_schp->bufflen &&
!sfp->res_in_use) {
sfp->res_in_use = 1;
sg_link_reserve(sfp, srp, dxfer_len);
} else if (hp->flags & SG_FLAG_MMAP_IO) {
res = -EBUSY; /* sfp->res_in_use == 1 */
if (dxfer_len > rsv_schp->bufflen)
res = -ENOMEM;
mutex_unlock(&sfp->f_mutex);
return res;
} else {
res = sg_build_indirect(req_schp, sfp, dxfer_len);
if (res) {
mutex_unlock(&sfp->f_mutex);
return res;
}
}
mutex_unlock(&sfp->f_mutex);
md->pages = req_schp->pages;
md->page_order = req_schp->page_order;
md->nr_entries = req_schp->k_use_sg;
md->offset = 0;
md->null_mapped = hp->dxferp ? 0 : 1;
if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
md->from_user = 1;
else
md->from_user = 0;
}
if (iov_count) {
struct iovec *iov = NULL;
struct iov_iter i;
res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
if (res < 0)
return res;
iov_iter_truncate(&i, hp->dxfer_len);
if (!iov_iter_count(&i)) {
kfree(iov);
return -EINVAL;
}
res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
kfree(iov);
} else
res = blk_rq_map_user(q, rq, md, hp->dxferp,
hp->dxfer_len, GFP_ATOMIC);
if (!res) {
srp->bio = rq->bio;
if (!md) {
req_schp->dio_in_use = 1;
hp->info |= SG_INFO_DIRECT_IO;
}
}
return res;
}
static int
sg_finish_rem_req(Sg_request *srp)
{
int ret = 0;
Sg_fd *sfp = srp->parentfp;
Sg_scatter_hold *req_schp = &srp->data;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_finish_rem_req: res_used=%d\n",
(int) srp->res_used));
if (srp->bio)
ret = blk_rq_unmap_user(srp->bio);
if (srp->rq) {
scsi_req_free_cmd(scsi_req(srp->rq));
blk_put_request(srp->rq);
}
if (srp->res_used)
sg_unlink_reserve(sfp, srp);
else
sg_remove_scat(sfp, req_schp);
return ret;
}
static int
sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
{
int sg_bufflen = tablesize * sizeof(struct page *);
gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
schp->pages = kzalloc(sg_bufflen, gfp_flags);
if (!schp->pages)
return -ENOMEM;
schp->sglist_len = sg_bufflen;
return tablesize; /* number of scat_gath elements allocated */
}
static int
sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
{
int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
int sg_tablesize = sfp->parentdp->sg_tablesize;
int blk_size = buff_size, order;
gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
struct sg_device *sdp = sfp->parentdp;
if (blk_size < 0)
return -EFAULT;
if (0 == blk_size)
++blk_size; /* don't know why */
/* round request up to next highest SG_SECTOR_SZ byte boundary */
blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: buff_size=%d, blk_size=%d\n",
buff_size, blk_size));
/* N.B. ret_sz carried into this block ... */
mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
if (mx_sc_elems < 0)
return mx_sc_elems; /* most likely -ENOMEM */
num = scatter_elem_sz;
if (unlikely(num != scatter_elem_sz_prev)) {
if (num < PAGE_SIZE) {
scatter_elem_sz = PAGE_SIZE;
scatter_elem_sz_prev = PAGE_SIZE;
} else
scatter_elem_sz_prev = num;
}
if (sdp->device->host->unchecked_isa_dma)
gfp_mask |= GFP_DMA;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
gfp_mask |= __GFP_ZERO;
order = get_order(num);
retry:
ret_sz = 1 << (PAGE_SHIFT + order);
for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
k++, rem_sz -= ret_sz) {
num = (rem_sz > scatter_elem_sz_prev) ?
scatter_elem_sz_prev : rem_sz;
schp->pages[k] = alloc_pages(gfp_mask, order);
if (!schp->pages[k])
goto out;
if (num == scatter_elem_sz_prev) {
if (unlikely(ret_sz > scatter_elem_sz_prev)) {
scatter_elem_sz = ret_sz;
scatter_elem_sz_prev = ret_sz;
}
}
SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: k=%d, num=%d, ret_sz=%d\n",
k, num, ret_sz));
} /* end of for loop */
schp->page_order = order;
schp->k_use_sg = k;
SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: k_use_sg=%d, rem_sz=%d\n",
k, rem_sz));
schp->bufflen = blk_size;
if (rem_sz > 0) /* must have failed */
return -ENOMEM;
return 0;
out:
for (i = 0; i < k; i++)
__free_pages(schp->pages[i], order);
if (--order >= 0)
goto retry;
return -ENOMEM;
}
static void
sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp)
{
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
if (schp->pages && schp->sglist_len > 0) {
if (!schp->dio_in_use) {
int k;
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
SCSI_LOG_TIMEOUT(5,
sg_printk(KERN_INFO, sfp->parentdp,
"sg_remove_scat: k=%d, pg=0x%p\n",
k, schp->pages[k]));
__free_pages(schp->pages[k], schp->page_order);
}
kfree(schp->pages);
}
}
memset(schp, 0, sizeof (*schp));
}
static int
sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
{
Sg_scatter_hold *schp = &srp->data;
int k, num;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
"sg_read_oxfer: num_read_xfer=%d\n",
num_read_xfer));
if ((!outp) || (num_read_xfer <= 0))
return 0;
num = 1 << (PAGE_SHIFT + schp->page_order);
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
if (num > num_read_xfer) {
if (__copy_to_user(outp, page_address(schp->pages[k]),
num_read_xfer))
return -EFAULT;
break;
} else {
if (__copy_to_user(outp, page_address(schp->pages[k]),
num))
return -EFAULT;
num_read_xfer -= num;
if (num_read_xfer <= 0)
break;
outp += num;
}
}
return 0;
}
static void
sg_build_reserve(Sg_fd * sfp, int req_size)
{
Sg_scatter_hold *schp = &sfp->reserve;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_reserve: req_size=%d\n", req_size));
do {
if (req_size < PAGE_SIZE)
req_size = PAGE_SIZE;
if (0 == sg_build_indirect(schp, sfp, req_size))
return;
else
sg_remove_scat(sfp, schp);
req_size >>= 1; /* divide by 2 */
} while (req_size > (PAGE_SIZE / 2));
}
static void
sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
{
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
int k, num, rem;
srp->res_used = 1;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_link_reserve: size=%d\n", size));
rem = size;
num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
for (k = 0; k < rsv_schp->k_use_sg; k++) {
if (rem <= num) {
req_schp->k_use_sg = k + 1;
req_schp->sglist_len = rsv_schp->sglist_len;
req_schp->pages = rsv_schp->pages;
req_schp->bufflen = size;
req_schp->page_order = rsv_schp->page_order;
break;
} else
rem -= num;
}
if (k >= rsv_schp->k_use_sg)
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_link_reserve: BAD size\n"));
}
static void
sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
{
Sg_scatter_hold *req_schp = &srp->data;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
"sg_unlink_reserve: req->k_use_sg=%d\n",
(int) req_schp->k_use_sg));
req_schp->k_use_sg = 0;
req_schp->bufflen = 0;
req_schp->pages = NULL;
req_schp->page_order = 0;
req_schp->sglist_len = 0;
srp->res_used = 0;
/* Called without mutex lock to avoid deadlock */
sfp->res_in_use = 0;
}
static Sg_request *
sg_get_rq_mark(Sg_fd * sfp, int pack_id)
{
Sg_request *resp;
unsigned long iflags;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(resp, &sfp->rq_list, entry) {
/* look for requests that are ready + not SG_IO owned */
if ((1 == resp->done) && (!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
resp->done = 2; /* guard against other readers */
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return resp;
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return NULL;
}
/* always adds to end of list */
static Sg_request *
sg_add_request(Sg_fd * sfp)
{
int k;
unsigned long iflags;
Sg_request *rp = sfp->req_arr;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
if (!list_empty(&sfp->rq_list)) {
if (!sfp->cmd_q)
goto out_unlock;
for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
if (!rp->parentfp)
break;
}
if (k >= SG_MAX_QUEUE)
goto out_unlock;
}
memset(rp, 0, sizeof (Sg_request));
rp->parentfp = sfp;
rp->header.duration = jiffies_to_msecs(jiffies);
list_add_tail(&rp->entry, &sfp->rq_list);
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return rp;
out_unlock:
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return NULL;
}
/* Return of 1 for found; 0 for not found */
static int
sg_remove_request(Sg_fd * sfp, Sg_request * srp)
{
unsigned long iflags;
int res = 0;
if (!sfp || !srp || list_empty(&sfp->rq_list))
return res;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
if (!list_empty(&srp->entry)) {
list_del(&srp->entry);
srp->parentfp = NULL;
res = 1;
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return res;
}
static Sg_fd *
sg_add_sfp(Sg_device * sdp)
{
Sg_fd *sfp;
unsigned long iflags;
int bufflen;
sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
if (!sfp)
return ERR_PTR(-ENOMEM);
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
INIT_LIST_HEAD(&sfp->rq_list);
kref_init(&sfp->f_ref);
mutex_init(&sfp->f_mutex);
sfp->timeout = SG_DEFAULT_TIMEOUT;
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
sfp->cmd_q = SG_DEF_COMMAND_Q;
sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
sfp->parentdp = sdp;
write_lock_irqsave(&sdp->sfd_lock, iflags);
if (atomic_read(&sdp->detaching)) {
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
return ERR_PTR(-ENODEV);
}
list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_add_sfp: sfp=0x%p\n", sfp));
if (unlikely(sg_big_buff != def_reserved_size))
sg_big_buff = def_reserved_size;
bufflen = min_t(int, sg_big_buff,
max_sectors_bytes(sdp->device->request_queue));
sg_build_reserve(sfp, bufflen);
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
sfp->reserve.bufflen,
sfp->reserve.k_use_sg));
kref_get(&sdp->d_ref);
__module_get(THIS_MODULE);
return sfp;
}
static void
sg_remove_sfp_usercontext(struct work_struct *work)
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
Sg_request *srp;
unsigned long iflags;
/* Cleanup any responses which were never read(). */
write_lock_irqsave(&sfp->rq_list_lock, iflags);
while (!list_empty(&sfp->rq_list)) {
srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
sg_finish_rem_req(srp);
list_del(&srp->entry);
srp->parentfp = NULL;
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (sfp->reserve.bufflen > 0) {
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
"sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
(int) sfp->reserve.bufflen,
(int) sfp->reserve.k_use_sg));
sg_remove_scat(sfp, &sfp->reserve);
}
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
"sg_remove_sfp: sfp=0x%p\n", sfp));
kfree(sfp);
scsi_device_put(sdp->device);
kref_put(&sdp->d_ref, sg_device_destroy);
module_put(THIS_MODULE);
}
static void
sg_remove_sfp(struct kref *kref)
{
struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
struct sg_device *sdp = sfp->parentdp;
unsigned long iflags;
write_lock_irqsave(&sdp->sfd_lock, iflags);
list_del(&sfp->sfd_siblings);
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
schedule_work(&sfp->ew.work);
}
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
{
int *k = data;
if (*k < id)
*k = id;
return 0;
}
static int
sg_last_dev(void)
{
int k = -1;
unsigned long iflags;
read_lock_irqsave(&sg_index_lock, iflags);
idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
read_unlock_irqrestore(&sg_index_lock, iflags);
return k + 1; /* origin 1 */
}
#endif
/* must be called with sg_index_lock held */
static Sg_device *sg_lookup_dev(int dev)
{
return idr_find(&sg_index_idr, dev);
}
static Sg_device *
sg_get_dev(int dev)
{
struct sg_device *sdp;
unsigned long flags;
read_lock_irqsave(&sg_index_lock, flags);
sdp = sg_lookup_dev(dev);
if (!sdp)
sdp = ERR_PTR(-ENXIO);
else if (atomic_read(&sdp->detaching)) {
/* If sdp->detaching, then the refcount may already be 0, in
* which case it would be a bug to do kref_get().
*/
sdp = ERR_PTR(-ENODEV);
} else
kref_get(&sdp->d_ref);
read_unlock_irqrestore(&sg_index_lock, flags);
return sdp;
}
#ifdef CONFIG_SCSI_PROC_FS
static struct proc_dir_entry *sg_proc_sgp = NULL;
static char sg_proc_sg_dirname[] = "scsi/sg";
static int sg_proc_seq_show_int(struct seq_file *s, void *v);
static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
size_t count, loff_t *off);
static const struct file_operations adio_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_adio,
.read = seq_read,
.llseek = seq_lseek,
.write = sg_proc_write_adio,
.release = single_release,
};
static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
static ssize_t sg_proc_write_dressz(struct file *filp,
const char __user *buffer, size_t count, loff_t *off);
static const struct file_operations dressz_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_dressz,
.read = seq_read,
.llseek = seq_lseek,
.write = sg_proc_write_dressz,
.release = single_release,
};
static int sg_proc_seq_show_version(struct seq_file *s, void *v);
static int sg_proc_single_open_version(struct inode *inode, struct file *file);
static const struct file_operations version_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_version,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
static const struct file_operations devhdr_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_devhdr,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
static int sg_proc_open_dev(struct inode *inode, struct file *file);
static void * dev_seq_start(struct seq_file *s, loff_t *pos);
static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
static void dev_seq_stop(struct seq_file *s, void *v);
static const struct file_operations dev_fops = {
.owner = THIS_MODULE,
.open = sg_proc_open_dev,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct seq_operations dev_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = sg_proc_seq_show_dev,
};
static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
static const struct file_operations devstrs_fops = {
.owner = THIS_MODULE,
.open = sg_proc_open_devstrs,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct seq_operations devstrs_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = sg_proc_seq_show_devstrs,
};
static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
static int sg_proc_open_debug(struct inode *inode, struct file *file);
static const struct file_operations debug_fops = {
.owner = THIS_MODULE,
.open = sg_proc_open_debug,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct seq_operations debug_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = sg_proc_seq_show_debug,
};
struct sg_proc_leaf {
const char * name;
const struct file_operations * fops;
};
static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
{"allow_dio", &adio_fops},
{"debug", &debug_fops},
{"def_reserved_size", &dressz_fops},
{"device_hdr", &devhdr_fops},
{"devices", &dev_fops},
{"device_strs", &devstrs_fops},
{"version", &version_fops}
};
static int
sg_proc_init(void)
{
int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
int k;
sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
if (!sg_proc_sgp)
return 1;
for (k = 0; k < num_leaves; ++k) {
const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
}
return 0;
}
static void
sg_proc_cleanup(void)
{
int k;
int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
if (!sg_proc_sgp)
return;
for (k = 0; k < num_leaves; ++k)
remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
remove_proc_entry(sg_proc_sg_dirname, NULL);
}
static int sg_proc_seq_show_int(struct seq_file *s, void *v)
{
seq_printf(s, "%d\n", *((int *)s->private));
return 0;
}
static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
}
static ssize_t
sg_proc_write_adio(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
int err;
unsigned long num;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
err = kstrtoul_from_user(buffer, count, 0, &num);
if (err)
return err;
sg_allow_dio = num ? 1 : 0;
return count;
}
static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
}
static ssize_t
sg_proc_write_dressz(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
int err;
unsigned long k = ULONG_MAX;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
err = kstrtoul_from_user(buffer, count, 0, &k);
if (err)
return err;
if (k <= 1048576) { /* limit "big buff" to 1 MB */
sg_big_buff = k;
return count;
}
return -ERANGE;
}
static int sg_proc_seq_show_version(struct seq_file *s, void *v)
{
seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
sg_version_date);
return 0;
}
static int sg_proc_single_open_version(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_version, NULL);
}
static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
{
seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n");
return 0;
}
static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_devhdr, NULL);
}
struct sg_proc_deviter {
loff_t index;
size_t max;
};
static void * dev_seq_start(struct seq_file *s, loff_t *pos)
{
struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
s->private = it;
if (! it)
return NULL;
it->index = *pos;
it->max = sg_last_dev();
if (it->index >= it->max)
return NULL;
return it;
}
static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct sg_proc_deviter * it = s->private;
*pos = ++it->index;
return (it->index < it->max) ? it : NULL;
}
static void dev_seq_stop(struct seq_file *s, void *v)
{
kfree(s->private);
}
static int sg_proc_open_dev(struct inode *inode, struct file *file)
{
return seq_open(file, &dev_seq_ops);
}
static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
{
struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
Sg_device *sdp;
struct scsi_device *scsidp;
unsigned long iflags;
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
if ((NULL == sdp) || (NULL == sdp->device) ||
(atomic_read(&sdp->detaching)))
seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
else {
scsidp = sdp->device;
seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n",
scsidp->host->host_no, scsidp->channel,
scsidp->id, scsidp->lun, (int) scsidp->type,
1,
(int) scsidp->queue_depth,
(int) atomic_read(&scsidp->device_busy),
(int) scsi_device_online(scsidp));
}
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
{
return seq_open(file, &devstrs_seq_ops);
}
static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
{
struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
Sg_device *sdp;
struct scsi_device *scsidp;
unsigned long iflags;
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
scsidp = sdp ? sdp->device : NULL;
if (sdp && scsidp && (!atomic_read(&sdp->detaching)))
seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
scsidp->vendor, scsidp->model, scsidp->rev);
else
seq_puts(s, "<no active device>\n");
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
/* must be called while holding sg_index_lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
int k, new_interface, blen, usg;
Sg_request *srp;
Sg_fd *fp;
const sg_io_hdr_t *hp;
const char * cp;
unsigned int ms;
k = 0;
list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
k++;
read_lock(&fp->rq_list_lock); /* irqs already disabled */
seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
"(res)sgat=%d low_dma=%d\n", k,
jiffies_to_msecs(fp->timeout),
fp->reserve.bufflen,
(int) fp->reserve.k_use_sg,
(int) sdp->device->host->unchecked_isa_dma);
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
(int) fp->cmd_q, (int) fp->force_packid,
(int) fp->keep_orphan);
list_for_each_entry(srp, &fp->rq_list, entry) {
hp = &srp->header;
new_interface = (hp->interface_id == '\0') ? 0 : 1;
if (srp->res_used) {
if (new_interface &&
(SG_FLAG_MMAP_IO & hp->flags))
cp = " mmap>> ";
else
cp = " rb>> ";
} else {
if (SG_INFO_DIRECT_IO_MASK & hp->info)
cp = " dio>> ";
else
cp = " ";
}
seq_puts(s, cp);
blen = srp->data.bufflen;
usg = srp->data.k_use_sg;
seq_puts(s, srp->done ?
((1 == srp->done) ? "rcv:" : "fin:")
: "act:");
seq_printf(s, " id=%d blen=%d",
srp->header.pack_id, blen);
if (srp->done)
seq_printf(s, " dur=%d", hp->duration);
else {
ms = jiffies_to_msecs(jiffies);
seq_printf(s, " t_o/elap=%d/%d",
(new_interface ? hp->timeout :
jiffies_to_msecs(fp->timeout)),
(ms > hp->duration ? ms - hp->duration : 0));
}
seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
(int) srp->data.cmd_opcode);
}
if (list_empty(&fp->rq_list))
seq_puts(s, " No requests active\n");
read_unlock(&fp->rq_list_lock);
}
}
static int sg_proc_open_debug(struct inode *inode, struct file *file)
{
return seq_open(file, &debug_seq_ops);
}
static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
{
struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
Sg_device *sdp;
unsigned long iflags;
if (it && (0 == it->index))
seq_printf(s, "max_active_device=%d def_reserved_size=%d\n",
(int)it->max, sg_big_buff);
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
if (NULL == sdp)
goto skip;
read_lock(&sdp->sfd_lock);
if (!list_empty(&sdp->sfds)) {
seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
if (atomic_read(&sdp->detaching))
seq_puts(s, "detaching pending close ");
else if (sdp->device) {
struct scsi_device *scsidp = sdp->device;
seq_printf(s, "%d:%d:%d:%llu em=%d",
scsidp->host->host_no,
scsidp->channel, scsidp->id,
scsidp->lun,
scsidp->host->hostt->emulated);
}
seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n",
sdp->sg_tablesize, sdp->exclude, sdp->open_cnt);
sg_proc_debug_helper(s, sdp);
}
read_unlock(&sdp->sfd_lock);
skip:
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
#endif /* CONFIG_SCSI_PROC_FS */
module_init(init_sg);
module_exit(exit_sg);
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_2828_0 |
crossvul-cpp_data_good_5258_0 | /*
+----------------------------------------------------------------------+
| PHP Version 5 |
+----------------------------------------------------------------------+
| Copyright (c) 1997-2016 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Rasmus Lerdorf <rasmus@php.net> |
| Marcus Boerger <helly@php.net> |
+----------------------------------------------------------------------+
*/
/* $Id$ */
/* ToDos
*
* See if example images from http://www.exif.org have illegal
* thumbnail sizes or if code is corrupt.
* Create/Update exif headers.
* Create/Remove/Update image thumbnails.
*/
/* Security
*
* At current time i do not see any security problems but a potential
* attacker could generate an image with recursive ifd pointers...(Marcus)
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "ext/standard/file.h"
#if HAVE_EXIF
/* When EXIF_DEBUG is defined the module generates a lot of debug messages
* that help understanding what is going on. This can and should be used
* while extending the module as it shows if you are at the right position.
* You are always considered to have a copy of TIFF6.0 and EXIF2.10 standard.
*/
#undef EXIF_DEBUG
#ifdef EXIF_DEBUG
#define EXIFERR_DC , const char *_file, size_t _line TSRMLS_DC
#define EXIFERR_CC , __FILE__, __LINE__ TSRMLS_CC
#else
#define EXIFERR_DC TSRMLS_DC
#define EXIFERR_CC TSRMLS_CC
#endif
#undef EXIF_JPEG2000
#include "php_exif.h"
#include <math.h>
#include "php_ini.h"
#include "ext/standard/php_string.h"
#include "ext/standard/php_image.h"
#include "ext/standard/info.h"
/* needed for ssize_t definition */
#include <sys/types.h>
typedef unsigned char uchar;
#ifndef safe_emalloc
# define safe_emalloc(a,b,c) emalloc((a)*(b)+(c))
#endif
#ifndef safe_erealloc
# define safe_erealloc(p,a,b,c) erealloc(p, (a)*(b)+(c))
#endif
#ifndef TRUE
# define TRUE 1
# define FALSE 0
#endif
#ifndef max
# define max(a,b) ((a)>(b) ? (a) : (b))
#endif
#define EFREE_IF(ptr) if (ptr) efree(ptr)
#define MAX_IFD_NESTING_LEVEL 100
/* {{{ arginfo */
ZEND_BEGIN_ARG_INFO(arginfo_exif_tagname, 0)
ZEND_ARG_INFO(0, index)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_exif_read_data, 0, 0, 1)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(0, sections_needed)
ZEND_ARG_INFO(0, sub_arrays)
ZEND_ARG_INFO(0, read_thumbnail)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_exif_thumbnail, 0, 0, 1)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(1, width)
ZEND_ARG_INFO(1, height)
ZEND_ARG_INFO(1, imagetype)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_exif_imagetype, 0)
ZEND_ARG_INFO(0, imagefile)
ZEND_END_ARG_INFO()
/* }}} */
/* {{{ exif_functions[]
*/
const zend_function_entry exif_functions[] = {
PHP_FE(exif_read_data, arginfo_exif_read_data)
PHP_FALIAS(read_exif_data, exif_read_data, arginfo_exif_read_data)
PHP_FE(exif_tagname, arginfo_exif_tagname)
PHP_FE(exif_thumbnail, arginfo_exif_thumbnail)
PHP_FE(exif_imagetype, arginfo_exif_imagetype)
PHP_FE_END
};
/* }}} */
#define EXIF_VERSION "1.4 $Id$"
/* {{{ PHP_MINFO_FUNCTION
*/
PHP_MINFO_FUNCTION(exif)
{
php_info_print_table_start();
php_info_print_table_row(2, "EXIF Support", "enabled");
php_info_print_table_row(2, "EXIF Version", EXIF_VERSION);
php_info_print_table_row(2, "Supported EXIF Version", "0220");
php_info_print_table_row(2, "Supported filetypes", "JPEG,TIFF");
php_info_print_table_end();
DISPLAY_INI_ENTRIES();
}
/* }}} */
ZEND_BEGIN_MODULE_GLOBALS(exif)
char * encode_unicode;
char * decode_unicode_be;
char * decode_unicode_le;
char * encode_jis;
char * decode_jis_be;
char * decode_jis_le;
ZEND_END_MODULE_GLOBALS(exif)
ZEND_DECLARE_MODULE_GLOBALS(exif)
#ifdef ZTS
#define EXIF_G(v) TSRMG(exif_globals_id, zend_exif_globals *, v)
#else
#define EXIF_G(v) (exif_globals.v)
#endif
/* {{{ PHP_INI
*/
ZEND_INI_MH(OnUpdateEncode)
{
if (new_value && new_value_length) {
const zend_encoding **return_list;
size_t return_size;
if (FAILURE == zend_multibyte_parse_encoding_list(new_value, new_value_length,
&return_list, &return_size, 0 TSRMLS_CC)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Illegal encoding ignored: '%s'", new_value);
return FAILURE;
}
efree(return_list);
}
return OnUpdateString(entry, new_value, new_value_length, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC);
}
ZEND_INI_MH(OnUpdateDecode)
{
if (new_value) {
const zend_encoding **return_list;
size_t return_size;
if (FAILURE == zend_multibyte_parse_encoding_list(new_value, new_value_length,
&return_list, &return_size, 0 TSRMLS_CC)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Illegal encoding ignored: '%s'", new_value);
return FAILURE;
}
efree(return_list);
}
return OnUpdateString(entry, new_value, new_value_length, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC);
}
PHP_INI_BEGIN()
STD_PHP_INI_ENTRY("exif.encode_unicode", "ISO-8859-15", PHP_INI_ALL, OnUpdateEncode, encode_unicode, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_unicode_motorola", "UCS-2BE", PHP_INI_ALL, OnUpdateDecode, decode_unicode_be, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_unicode_intel", "UCS-2LE", PHP_INI_ALL, OnUpdateDecode, decode_unicode_le, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.encode_jis", "", PHP_INI_ALL, OnUpdateEncode, encode_jis, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_jis_motorola", "JIS", PHP_INI_ALL, OnUpdateDecode, decode_jis_be, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_jis_intel", "JIS", PHP_INI_ALL, OnUpdateDecode, decode_jis_le, zend_exif_globals, exif_globals)
PHP_INI_END()
/* }}} */
/* {{{ PHP_GINIT_FUNCTION
*/
static PHP_GINIT_FUNCTION(exif)
{
exif_globals->encode_unicode = NULL;
exif_globals->decode_unicode_be = NULL;
exif_globals->decode_unicode_le = NULL;
exif_globals->encode_jis = NULL;
exif_globals->decode_jis_be = NULL;
exif_globals->decode_jis_le = NULL;
}
/* }}} */
/* {{{ PHP_MINIT_FUNCTION(exif)
Get the size of an image as 4-element array */
PHP_MINIT_FUNCTION(exif)
{
REGISTER_INI_ENTRIES();
if (zend_hash_exists(&module_registry, "mbstring", sizeof("mbstring"))) {
REGISTER_LONG_CONSTANT("EXIF_USE_MBSTRING", 1, CONST_CS | CONST_PERSISTENT);
} else {
REGISTER_LONG_CONSTANT("EXIF_USE_MBSTRING", 0, CONST_CS | CONST_PERSISTENT);
}
return SUCCESS;
}
/* }}} */
/* {{{ PHP_MSHUTDOWN_FUNCTION
*/
PHP_MSHUTDOWN_FUNCTION(exif)
{
UNREGISTER_INI_ENTRIES();
return SUCCESS;
}
/* }}} */
/* {{{ exif dependencies */
static const zend_module_dep exif_module_deps[] = {
ZEND_MOD_REQUIRED("standard")
ZEND_MOD_OPTIONAL("mbstring")
ZEND_MOD_END
};
/* }}} */
/* {{{ exif_module_entry
*/
zend_module_entry exif_module_entry = {
STANDARD_MODULE_HEADER_EX, NULL,
exif_module_deps,
"exif",
exif_functions,
PHP_MINIT(exif),
PHP_MSHUTDOWN(exif),
NULL, NULL,
PHP_MINFO(exif),
#if ZEND_MODULE_API_NO >= 20010901
EXIF_VERSION,
#endif
#if ZEND_MODULE_API_NO >= 20060613
PHP_MODULE_GLOBALS(exif),
PHP_GINIT(exif),
NULL,
NULL,
STANDARD_MODULE_PROPERTIES_EX
#else
STANDARD_MODULE_PROPERTIES
#endif
};
/* }}} */
#ifdef COMPILE_DL_EXIF
ZEND_GET_MODULE(exif)
#endif
/* {{{ php_strnlen
* get length of string if buffer if less than buffer size or buffer size */
static size_t php_strnlen(char* str, size_t maxlen) {
size_t len = 0;
if (str && maxlen && *str) {
do {
len++;
} while (--maxlen && *(++str));
}
return len;
}
/* }}} */
/* {{{ error messages
*/
static const char * EXIF_ERROR_FILEEOF = "Unexpected end of file reached";
static const char * EXIF_ERROR_CORRUPT = "File structure corrupted";
static const char * EXIF_ERROR_THUMBEOF = "Thumbnail goes IFD boundary or end of file reached";
static const char * EXIF_ERROR_FSREALLOC = "Illegal reallocating of undefined file section";
#define EXIF_ERRLOG_FILEEOF(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_FILEEOF);
#define EXIF_ERRLOG_CORRUPT(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_CORRUPT);
#define EXIF_ERRLOG_THUMBEOF(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_THUMBEOF);
#define EXIF_ERRLOG_FSREALLOC(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_FSREALLOC);
/* }}} */
/* {{{ format description defines
Describes format descriptor
*/
static int php_tiff_bytes_per_format[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8, 1};
#define NUM_FORMATS 13
#define TAG_FMT_BYTE 1
#define TAG_FMT_STRING 2
#define TAG_FMT_USHORT 3
#define TAG_FMT_ULONG 4
#define TAG_FMT_URATIONAL 5
#define TAG_FMT_SBYTE 6
#define TAG_FMT_UNDEFINED 7
#define TAG_FMT_SSHORT 8
#define TAG_FMT_SLONG 9
#define TAG_FMT_SRATIONAL 10
#define TAG_FMT_SINGLE 11
#define TAG_FMT_DOUBLE 12
#define TAG_FMT_IFD 13
#ifdef EXIF_DEBUG
static char *exif_get_tagformat(int format)
{
switch(format) {
case TAG_FMT_BYTE: return "BYTE";
case TAG_FMT_STRING: return "STRING";
case TAG_FMT_USHORT: return "USHORT";
case TAG_FMT_ULONG: return "ULONG";
case TAG_FMT_URATIONAL: return "URATIONAL";
case TAG_FMT_SBYTE: return "SBYTE";
case TAG_FMT_UNDEFINED: return "UNDEFINED";
case TAG_FMT_SSHORT: return "SSHORT";
case TAG_FMT_SLONG: return "SLONG";
case TAG_FMT_SRATIONAL: return "SRATIONAL";
case TAG_FMT_SINGLE: return "SINGLE";
case TAG_FMT_DOUBLE: return "DOUBLE";
case TAG_FMT_IFD: return "IFD";
}
return "*Illegal";
}
#endif
/* Describes tag values */
#define TAG_GPS_VERSION_ID 0x0000
#define TAG_GPS_LATITUDE_REF 0x0001
#define TAG_GPS_LATITUDE 0x0002
#define TAG_GPS_LONGITUDE_REF 0x0003
#define TAG_GPS_LONGITUDE 0x0004
#define TAG_GPS_ALTITUDE_REF 0x0005
#define TAG_GPS_ALTITUDE 0x0006
#define TAG_GPS_TIME_STAMP 0x0007
#define TAG_GPS_SATELLITES 0x0008
#define TAG_GPS_STATUS 0x0009
#define TAG_GPS_MEASURE_MODE 0x000A
#define TAG_GPS_DOP 0x000B
#define TAG_GPS_SPEED_REF 0x000C
#define TAG_GPS_SPEED 0x000D
#define TAG_GPS_TRACK_REF 0x000E
#define TAG_GPS_TRACK 0x000F
#define TAG_GPS_IMG_DIRECTION_REF 0x0010
#define TAG_GPS_IMG_DIRECTION 0x0011
#define TAG_GPS_MAP_DATUM 0x0012
#define TAG_GPS_DEST_LATITUDE_REF 0x0013
#define TAG_GPS_DEST_LATITUDE 0x0014
#define TAG_GPS_DEST_LONGITUDE_REF 0x0015
#define TAG_GPS_DEST_LONGITUDE 0x0016
#define TAG_GPS_DEST_BEARING_REF 0x0017
#define TAG_GPS_DEST_BEARING 0x0018
#define TAG_GPS_DEST_DISTANCE_REF 0x0019
#define TAG_GPS_DEST_DISTANCE 0x001A
#define TAG_GPS_PROCESSING_METHOD 0x001B
#define TAG_GPS_AREA_INFORMATION 0x001C
#define TAG_GPS_DATE_STAMP 0x001D
#define TAG_GPS_DIFFERENTIAL 0x001E
#define TAG_TIFF_COMMENT 0x00FE /* SHOUDLNT HAPPEN */
#define TAG_NEW_SUBFILE 0x00FE /* New version of subfile tag */
#define TAG_SUBFILE_TYPE 0x00FF /* Old version of subfile tag */
#define TAG_IMAGEWIDTH 0x0100
#define TAG_IMAGEHEIGHT 0x0101
#define TAG_BITS_PER_SAMPLE 0x0102
#define TAG_COMPRESSION 0x0103
#define TAG_PHOTOMETRIC_INTERPRETATION 0x0106
#define TAG_TRESHHOLDING 0x0107
#define TAG_CELL_WIDTH 0x0108
#define TAG_CELL_HEIGHT 0x0109
#define TAG_FILL_ORDER 0x010A
#define TAG_DOCUMENT_NAME 0x010D
#define TAG_IMAGE_DESCRIPTION 0x010E
#define TAG_MAKE 0x010F
#define TAG_MODEL 0x0110
#define TAG_STRIP_OFFSETS 0x0111
#define TAG_ORIENTATION 0x0112
#define TAG_SAMPLES_PER_PIXEL 0x0115
#define TAG_ROWS_PER_STRIP 0x0116
#define TAG_STRIP_BYTE_COUNTS 0x0117
#define TAG_MIN_SAMPPLE_VALUE 0x0118
#define TAG_MAX_SAMPLE_VALUE 0x0119
#define TAG_X_RESOLUTION 0x011A
#define TAG_Y_RESOLUTION 0x011B
#define TAG_PLANAR_CONFIGURATION 0x011C
#define TAG_PAGE_NAME 0x011D
#define TAG_X_POSITION 0x011E
#define TAG_Y_POSITION 0x011F
#define TAG_FREE_OFFSETS 0x0120
#define TAG_FREE_BYTE_COUNTS 0x0121
#define TAG_GRAY_RESPONSE_UNIT 0x0122
#define TAG_GRAY_RESPONSE_CURVE 0x0123
#define TAG_RESOLUTION_UNIT 0x0128
#define TAG_PAGE_NUMBER 0x0129
#define TAG_TRANSFER_FUNCTION 0x012D
#define TAG_SOFTWARE 0x0131
#define TAG_DATETIME 0x0132
#define TAG_ARTIST 0x013B
#define TAG_HOST_COMPUTER 0x013C
#define TAG_PREDICTOR 0x013D
#define TAG_WHITE_POINT 0x013E
#define TAG_PRIMARY_CHROMATICITIES 0x013F
#define TAG_COLOR_MAP 0x0140
#define TAG_HALFTONE_HINTS 0x0141
#define TAG_TILE_WIDTH 0x0142
#define TAG_TILE_LENGTH 0x0143
#define TAG_TILE_OFFSETS 0x0144
#define TAG_TILE_BYTE_COUNTS 0x0145
#define TAG_SUB_IFD 0x014A
#define TAG_INK_SETMPUTER 0x014C
#define TAG_INK_NAMES 0x014D
#define TAG_NUMBER_OF_INKS 0x014E
#define TAG_DOT_RANGE 0x0150
#define TAG_TARGET_PRINTER 0x0151
#define TAG_EXTRA_SAMPLE 0x0152
#define TAG_SAMPLE_FORMAT 0x0153
#define TAG_S_MIN_SAMPLE_VALUE 0x0154
#define TAG_S_MAX_SAMPLE_VALUE 0x0155
#define TAG_TRANSFER_RANGE 0x0156
#define TAG_JPEG_TABLES 0x015B
#define TAG_JPEG_PROC 0x0200
#define TAG_JPEG_INTERCHANGE_FORMAT 0x0201
#define TAG_JPEG_INTERCHANGE_FORMAT_LEN 0x0202
#define TAG_JPEG_RESTART_INTERVAL 0x0203
#define TAG_JPEG_LOSSLESS_PREDICTOR 0x0205
#define TAG_JPEG_POINT_TRANSFORMS 0x0206
#define TAG_JPEG_Q_TABLES 0x0207
#define TAG_JPEG_DC_TABLES 0x0208
#define TAG_JPEG_AC_TABLES 0x0209
#define TAG_YCC_COEFFICIENTS 0x0211
#define TAG_YCC_SUB_SAMPLING 0x0212
#define TAG_YCC_POSITIONING 0x0213
#define TAG_REFERENCE_BLACK_WHITE 0x0214
/* 0x0301 - 0x0302 */
/* 0x0320 */
/* 0x0343 */
/* 0x5001 - 0x501B */
/* 0x5021 - 0x503B */
/* 0x5090 - 0x5091 */
/* 0x5100 - 0x5101 */
/* 0x5110 - 0x5113 */
/* 0x80E3 - 0x80E6 */
/* 0x828d - 0x828F */
#define TAG_COPYRIGHT 0x8298
#define TAG_EXPOSURETIME 0x829A
#define TAG_FNUMBER 0x829D
#define TAG_EXIF_IFD_POINTER 0x8769
#define TAG_ICC_PROFILE 0x8773
#define TAG_EXPOSURE_PROGRAM 0x8822
#define TAG_SPECTRAL_SENSITY 0x8824
#define TAG_GPS_IFD_POINTER 0x8825
#define TAG_ISOSPEED 0x8827
#define TAG_OPTOELECTRIC_CONVERSION_F 0x8828
/* 0x8829 - 0x882b */
#define TAG_EXIFVERSION 0x9000
#define TAG_DATE_TIME_ORIGINAL 0x9003
#define TAG_DATE_TIME_DIGITIZED 0x9004
#define TAG_COMPONENT_CONFIG 0x9101
#define TAG_COMPRESSED_BITS_PER_PIXEL 0x9102
#define TAG_SHUTTERSPEED 0x9201
#define TAG_APERTURE 0x9202
#define TAG_BRIGHTNESS_VALUE 0x9203
#define TAG_EXPOSURE_BIAS_VALUE 0x9204
#define TAG_MAX_APERTURE 0x9205
#define TAG_SUBJECT_DISTANCE 0x9206
#define TAG_METRIC_MODULE 0x9207
#define TAG_LIGHT_SOURCE 0x9208
#define TAG_FLASH 0x9209
#define TAG_FOCAL_LENGTH 0x920A
/* 0x920B - 0x920D */
/* 0x9211 - 0x9216 */
#define TAG_SUBJECT_AREA 0x9214
#define TAG_MAKER_NOTE 0x927C
#define TAG_USERCOMMENT 0x9286
#define TAG_SUB_SEC_TIME 0x9290
#define TAG_SUB_SEC_TIME_ORIGINAL 0x9291
#define TAG_SUB_SEC_TIME_DIGITIZED 0x9292
/* 0x923F */
/* 0x935C */
#define TAG_XP_TITLE 0x9C9B
#define TAG_XP_COMMENTS 0x9C9C
#define TAG_XP_AUTHOR 0x9C9D
#define TAG_XP_KEYWORDS 0x9C9E
#define TAG_XP_SUBJECT 0x9C9F
#define TAG_FLASH_PIX_VERSION 0xA000
#define TAG_COLOR_SPACE 0xA001
#define TAG_COMP_IMAGE_WIDTH 0xA002 /* compressed images only */
#define TAG_COMP_IMAGE_HEIGHT 0xA003
#define TAG_RELATED_SOUND_FILE 0xA004
#define TAG_INTEROP_IFD_POINTER 0xA005 /* IFD pointer */
#define TAG_FLASH_ENERGY 0xA20B
#define TAG_SPATIAL_FREQUENCY_RESPONSE 0xA20C
#define TAG_FOCALPLANE_X_RES 0xA20E
#define TAG_FOCALPLANE_Y_RES 0xA20F
#define TAG_FOCALPLANE_RESOLUTION_UNIT 0xA210
#define TAG_SUBJECT_LOCATION 0xA214
#define TAG_EXPOSURE_INDEX 0xA215
#define TAG_SENSING_METHOD 0xA217
#define TAG_FILE_SOURCE 0xA300
#define TAG_SCENE_TYPE 0xA301
#define TAG_CFA_PATTERN 0xA302
#define TAG_CUSTOM_RENDERED 0xA401
#define TAG_EXPOSURE_MODE 0xA402
#define TAG_WHITE_BALANCE 0xA403
#define TAG_DIGITAL_ZOOM_RATIO 0xA404
#define TAG_FOCAL_LENGTH_IN_35_MM_FILM 0xA405
#define TAG_SCENE_CAPTURE_TYPE 0xA406
#define TAG_GAIN_CONTROL 0xA407
#define TAG_CONTRAST 0xA408
#define TAG_SATURATION 0xA409
#define TAG_SHARPNESS 0xA40A
#define TAG_DEVICE_SETTING_DESCRIPTION 0xA40B
#define TAG_SUBJECT_DISTANCE_RANGE 0xA40C
#define TAG_IMAGE_UNIQUE_ID 0xA420
/* Olympus specific tags */
#define TAG_OLYMPUS_SPECIALMODE 0x0200
#define TAG_OLYMPUS_JPEGQUAL 0x0201
#define TAG_OLYMPUS_MACRO 0x0202
#define TAG_OLYMPUS_DIGIZOOM 0x0204
#define TAG_OLYMPUS_SOFTWARERELEASE 0x0207
#define TAG_OLYMPUS_PICTINFO 0x0208
#define TAG_OLYMPUS_CAMERAID 0x0209
/* end Olympus specific tags */
/* Internal */
#define TAG_NONE -1 /* note that -1 <> 0xFFFF */
#define TAG_COMPUTED_VALUE -2
#define TAG_END_OF_LIST 0xFFFD
/* Values for TAG_PHOTOMETRIC_INTERPRETATION */
#define PMI_BLACK_IS_ZERO 0
#define PMI_WHITE_IS_ZERO 1
#define PMI_RGB 2
#define PMI_PALETTE_COLOR 3
#define PMI_TRANSPARENCY_MASK 4
#define PMI_SEPARATED 5
#define PMI_YCBCR 6
#define PMI_CIELAB 8
/* }}} */
/* {{{ TabTable[]
*/
typedef const struct {
unsigned short Tag;
char *Desc;
} tag_info_type;
typedef tag_info_type tag_info_array[];
typedef tag_info_type *tag_table_type;
#define TAG_TABLE_END \
{TAG_NONE, "No tag value"},\
{TAG_COMPUTED_VALUE, "Computed value"},\
{TAG_END_OF_LIST, ""} /* Important for exif_get_tagname() IF value != "" function result is != false */
static tag_info_array tag_table_IFD = {
{ 0x000B, "ACDComment"},
{ 0x00FE, "NewSubFile"}, /* better name it 'ImageType' ? */
{ 0x00FF, "SubFile"},
{ 0x0100, "ImageWidth"},
{ 0x0101, "ImageLength"},
{ 0x0102, "BitsPerSample"},
{ 0x0103, "Compression"},
{ 0x0106, "PhotometricInterpretation"},
{ 0x010A, "FillOrder"},
{ 0x010D, "DocumentName"},
{ 0x010E, "ImageDescription"},
{ 0x010F, "Make"},
{ 0x0110, "Model"},
{ 0x0111, "StripOffsets"},
{ 0x0112, "Orientation"},
{ 0x0115, "SamplesPerPixel"},
{ 0x0116, "RowsPerStrip"},
{ 0x0117, "StripByteCounts"},
{ 0x0118, "MinSampleValue"},
{ 0x0119, "MaxSampleValue"},
{ 0x011A, "XResolution"},
{ 0x011B, "YResolution"},
{ 0x011C, "PlanarConfiguration"},
{ 0x011D, "PageName"},
{ 0x011E, "XPosition"},
{ 0x011F, "YPosition"},
{ 0x0120, "FreeOffsets"},
{ 0x0121, "FreeByteCounts"},
{ 0x0122, "GrayResponseUnit"},
{ 0x0123, "GrayResponseCurve"},
{ 0x0124, "T4Options"},
{ 0x0125, "T6Options"},
{ 0x0128, "ResolutionUnit"},
{ 0x0129, "PageNumber"},
{ 0x012D, "TransferFunction"},
{ 0x0131, "Software"},
{ 0x0132, "DateTime"},
{ 0x013B, "Artist"},
{ 0x013C, "HostComputer"},
{ 0x013D, "Predictor"},
{ 0x013E, "WhitePoint"},
{ 0x013F, "PrimaryChromaticities"},
{ 0x0140, "ColorMap"},
{ 0x0141, "HalfToneHints"},
{ 0x0142, "TileWidth"},
{ 0x0143, "TileLength"},
{ 0x0144, "TileOffsets"},
{ 0x0145, "TileByteCounts"},
{ 0x014A, "SubIFD"},
{ 0x014C, "InkSet"},
{ 0x014D, "InkNames"},
{ 0x014E, "NumberOfInks"},
{ 0x0150, "DotRange"},
{ 0x0151, "TargetPrinter"},
{ 0x0152, "ExtraSample"},
{ 0x0153, "SampleFormat"},
{ 0x0154, "SMinSampleValue"},
{ 0x0155, "SMaxSampleValue"},
{ 0x0156, "TransferRange"},
{ 0x0157, "ClipPath"},
{ 0x0158, "XClipPathUnits"},
{ 0x0159, "YClipPathUnits"},
{ 0x015A, "Indexed"},
{ 0x015B, "JPEGTables"},
{ 0x015F, "OPIProxy"},
{ 0x0200, "JPEGProc"},
{ 0x0201, "JPEGInterchangeFormat"},
{ 0x0202, "JPEGInterchangeFormatLength"},
{ 0x0203, "JPEGRestartInterval"},
{ 0x0205, "JPEGLosslessPredictors"},
{ 0x0206, "JPEGPointTransforms"},
{ 0x0207, "JPEGQTables"},
{ 0x0208, "JPEGDCTables"},
{ 0x0209, "JPEGACTables"},
{ 0x0211, "YCbCrCoefficients"},
{ 0x0212, "YCbCrSubSampling"},
{ 0x0213, "YCbCrPositioning"},
{ 0x0214, "ReferenceBlackWhite"},
{ 0x02BC, "ExtensibleMetadataPlatform"}, /* XAP: Extensible Authoring Publishing, obsoleted by XMP: Extensible Metadata Platform */
{ 0x0301, "Gamma"},
{ 0x0302, "ICCProfileDescriptor"},
{ 0x0303, "SRGBRenderingIntent"},
{ 0x0320, "ImageTitle"},
{ 0x5001, "ResolutionXUnit"},
{ 0x5002, "ResolutionYUnit"},
{ 0x5003, "ResolutionXLengthUnit"},
{ 0x5004, "ResolutionYLengthUnit"},
{ 0x5005, "PrintFlags"},
{ 0x5006, "PrintFlagsVersion"},
{ 0x5007, "PrintFlagsCrop"},
{ 0x5008, "PrintFlagsBleedWidth"},
{ 0x5009, "PrintFlagsBleedWidthScale"},
{ 0x500A, "HalftoneLPI"},
{ 0x500B, "HalftoneLPIUnit"},
{ 0x500C, "HalftoneDegree"},
{ 0x500D, "HalftoneShape"},
{ 0x500E, "HalftoneMisc"},
{ 0x500F, "HalftoneScreen"},
{ 0x5010, "JPEGQuality"},
{ 0x5011, "GridSize"},
{ 0x5012, "ThumbnailFormat"},
{ 0x5013, "ThumbnailWidth"},
{ 0x5014, "ThumbnailHeight"},
{ 0x5015, "ThumbnailColorDepth"},
{ 0x5016, "ThumbnailPlanes"},
{ 0x5017, "ThumbnailRawBytes"},
{ 0x5018, "ThumbnailSize"},
{ 0x5019, "ThumbnailCompressedSize"},
{ 0x501A, "ColorTransferFunction"},
{ 0x501B, "ThumbnailData"},
{ 0x5020, "ThumbnailImageWidth"},
{ 0x5021, "ThumbnailImageHeight"},
{ 0x5022, "ThumbnailBitsPerSample"},
{ 0x5023, "ThumbnailCompression"},
{ 0x5024, "ThumbnailPhotometricInterp"},
{ 0x5025, "ThumbnailImageDescription"},
{ 0x5026, "ThumbnailEquipMake"},
{ 0x5027, "ThumbnailEquipModel"},
{ 0x5028, "ThumbnailStripOffsets"},
{ 0x5029, "ThumbnailOrientation"},
{ 0x502A, "ThumbnailSamplesPerPixel"},
{ 0x502B, "ThumbnailRowsPerStrip"},
{ 0x502C, "ThumbnailStripBytesCount"},
{ 0x502D, "ThumbnailResolutionX"},
{ 0x502E, "ThumbnailResolutionY"},
{ 0x502F, "ThumbnailPlanarConfig"},
{ 0x5030, "ThumbnailResolutionUnit"},
{ 0x5031, "ThumbnailTransferFunction"},
{ 0x5032, "ThumbnailSoftwareUsed"},
{ 0x5033, "ThumbnailDateTime"},
{ 0x5034, "ThumbnailArtist"},
{ 0x5035, "ThumbnailWhitePoint"},
{ 0x5036, "ThumbnailPrimaryChromaticities"},
{ 0x5037, "ThumbnailYCbCrCoefficients"},
{ 0x5038, "ThumbnailYCbCrSubsampling"},
{ 0x5039, "ThumbnailYCbCrPositioning"},
{ 0x503A, "ThumbnailRefBlackWhite"},
{ 0x503B, "ThumbnailCopyRight"},
{ 0x5090, "LuminanceTable"},
{ 0x5091, "ChrominanceTable"},
{ 0x5100, "FrameDelay"},
{ 0x5101, "LoopCount"},
{ 0x5110, "PixelUnit"},
{ 0x5111, "PixelPerUnitX"},
{ 0x5112, "PixelPerUnitY"},
{ 0x5113, "PaletteHistogram"},
{ 0x1000, "RelatedImageFileFormat"},
{ 0x800D, "ImageID"},
{ 0x80E3, "Matteing"}, /* obsoleted by ExtraSamples */
{ 0x80E4, "DataType"}, /* obsoleted by SampleFormat */
{ 0x80E5, "ImageDepth"},
{ 0x80E6, "TileDepth"},
{ 0x828D, "CFARepeatPatternDim"},
{ 0x828E, "CFAPattern"},
{ 0x828F, "BatteryLevel"},
{ 0x8298, "Copyright"},
{ 0x829A, "ExposureTime"},
{ 0x829D, "FNumber"},
{ 0x83BB, "IPTC/NAA"},
{ 0x84E3, "IT8RasterPadding"},
{ 0x84E5, "IT8ColorTable"},
{ 0x8649, "ImageResourceInformation"}, /* PhotoShop */
{ 0x8769, "Exif_IFD_Pointer"},
{ 0x8773, "ICC_Profile"},
{ 0x8822, "ExposureProgram"},
{ 0x8824, "SpectralSensity"},
{ 0x8828, "OECF"},
{ 0x8825, "GPS_IFD_Pointer"},
{ 0x8827, "ISOSpeedRatings"},
{ 0x8828, "OECF"},
{ 0x9000, "ExifVersion"},
{ 0x9003, "DateTimeOriginal"},
{ 0x9004, "DateTimeDigitized"},
{ 0x9101, "ComponentsConfiguration"},
{ 0x9102, "CompressedBitsPerPixel"},
{ 0x9201, "ShutterSpeedValue"},
{ 0x9202, "ApertureValue"},
{ 0x9203, "BrightnessValue"},
{ 0x9204, "ExposureBiasValue"},
{ 0x9205, "MaxApertureValue"},
{ 0x9206, "SubjectDistance"},
{ 0x9207, "MeteringMode"},
{ 0x9208, "LightSource"},
{ 0x9209, "Flash"},
{ 0x920A, "FocalLength"},
{ 0x920B, "FlashEnergy"}, /* 0xA20B in JPEG */
{ 0x920C, "SpatialFrequencyResponse"}, /* 0xA20C - - */
{ 0x920D, "Noise"},
{ 0x920E, "FocalPlaneXResolution"}, /* 0xA20E - - */
{ 0x920F, "FocalPlaneYResolution"}, /* 0xA20F - - */
{ 0x9210, "FocalPlaneResolutionUnit"}, /* 0xA210 - - */
{ 0x9211, "ImageNumber"},
{ 0x9212, "SecurityClassification"},
{ 0x9213, "ImageHistory"},
{ 0x9214, "SubjectLocation"}, /* 0xA214 - - */
{ 0x9215, "ExposureIndex"}, /* 0xA215 - - */
{ 0x9216, "TIFF/EPStandardID"},
{ 0x9217, "SensingMethod"}, /* 0xA217 - - */
{ 0x923F, "StoNits"},
{ 0x927C, "MakerNote"},
{ 0x9286, "UserComment"},
{ 0x9290, "SubSecTime"},
{ 0x9291, "SubSecTimeOriginal"},
{ 0x9292, "SubSecTimeDigitized"},
{ 0x935C, "ImageSourceData"}, /* "Adobe Photoshop Document Data Block": 8BIM... */
{ 0x9c9b, "Title" }, /* Win XP specific, Unicode */
{ 0x9c9c, "Comments" }, /* Win XP specific, Unicode */
{ 0x9c9d, "Author" }, /* Win XP specific, Unicode */
{ 0x9c9e, "Keywords" }, /* Win XP specific, Unicode */
{ 0x9c9f, "Subject" }, /* Win XP specific, Unicode, not to be confused with SubjectDistance and SubjectLocation */
{ 0xA000, "FlashPixVersion"},
{ 0xA001, "ColorSpace"},
{ 0xA002, "ExifImageWidth"},
{ 0xA003, "ExifImageLength"},
{ 0xA004, "RelatedSoundFile"},
{ 0xA005, "InteroperabilityOffset"},
{ 0xA20B, "FlashEnergy"}, /* 0x920B in TIFF/EP */
{ 0xA20C, "SpatialFrequencyResponse"}, /* 0x920C - - */
{ 0xA20D, "Noise"},
{ 0xA20E, "FocalPlaneXResolution"}, /* 0x920E - - */
{ 0xA20F, "FocalPlaneYResolution"}, /* 0x920F - - */
{ 0xA210, "FocalPlaneResolutionUnit"}, /* 0x9210 - - */
{ 0xA211, "ImageNumber"},
{ 0xA212, "SecurityClassification"},
{ 0xA213, "ImageHistory"},
{ 0xA214, "SubjectLocation"}, /* 0x9214 - - */
{ 0xA215, "ExposureIndex"}, /* 0x9215 - - */
{ 0xA216, "TIFF/EPStandardID"},
{ 0xA217, "SensingMethod"}, /* 0x9217 - - */
{ 0xA300, "FileSource"},
{ 0xA301, "SceneType"},
{ 0xA302, "CFAPattern"},
{ 0xA401, "CustomRendered"},
{ 0xA402, "ExposureMode"},
{ 0xA403, "WhiteBalance"},
{ 0xA404, "DigitalZoomRatio"},
{ 0xA405, "FocalLengthIn35mmFilm"},
{ 0xA406, "SceneCaptureType"},
{ 0xA407, "GainControl"},
{ 0xA408, "Contrast"},
{ 0xA409, "Saturation"},
{ 0xA40A, "Sharpness"},
{ 0xA40B, "DeviceSettingDescription"},
{ 0xA40C, "SubjectDistanceRange"},
{ 0xA420, "ImageUniqueID"},
TAG_TABLE_END
} ;
static tag_info_array tag_table_GPS = {
{ 0x0000, "GPSVersion"},
{ 0x0001, "GPSLatitudeRef"},
{ 0x0002, "GPSLatitude"},
{ 0x0003, "GPSLongitudeRef"},
{ 0x0004, "GPSLongitude"},
{ 0x0005, "GPSAltitudeRef"},
{ 0x0006, "GPSAltitude"},
{ 0x0007, "GPSTimeStamp"},
{ 0x0008, "GPSSatellites"},
{ 0x0009, "GPSStatus"},
{ 0x000A, "GPSMeasureMode"},
{ 0x000B, "GPSDOP"},
{ 0x000C, "GPSSpeedRef"},
{ 0x000D, "GPSSpeed"},
{ 0x000E, "GPSTrackRef"},
{ 0x000F, "GPSTrack"},
{ 0x0010, "GPSImgDirectionRef"},
{ 0x0011, "GPSImgDirection"},
{ 0x0012, "GPSMapDatum"},
{ 0x0013, "GPSDestLatitudeRef"},
{ 0x0014, "GPSDestLatitude"},
{ 0x0015, "GPSDestLongitudeRef"},
{ 0x0016, "GPSDestLongitude"},
{ 0x0017, "GPSDestBearingRef"},
{ 0x0018, "GPSDestBearing"},
{ 0x0019, "GPSDestDistanceRef"},
{ 0x001A, "GPSDestDistance"},
{ 0x001B, "GPSProcessingMode"},
{ 0x001C, "GPSAreaInformation"},
{ 0x001D, "GPSDateStamp"},
{ 0x001E, "GPSDifferential"},
TAG_TABLE_END
};
static tag_info_array tag_table_IOP = {
{ 0x0001, "InterOperabilityIndex"}, /* should be 'R98' or 'THM' */
{ 0x0002, "InterOperabilityVersion"},
{ 0x1000, "RelatedFileFormat"},
{ 0x1001, "RelatedImageWidth"},
{ 0x1002, "RelatedImageHeight"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_CANON = {
{ 0x0001, "ModeArray"}, /* guess */
{ 0x0004, "ImageInfo"}, /* guess */
{ 0x0006, "ImageType"},
{ 0x0007, "FirmwareVersion"},
{ 0x0008, "ImageNumber"},
{ 0x0009, "OwnerName"},
{ 0x000C, "Camera"},
{ 0x000F, "CustomFunctions"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_CASIO = {
{ 0x0001, "RecordingMode"},
{ 0x0002, "Quality"},
{ 0x0003, "FocusingMode"},
{ 0x0004, "FlashMode"},
{ 0x0005, "FlashIntensity"},
{ 0x0006, "ObjectDistance"},
{ 0x0007, "WhiteBalance"},
{ 0x000A, "DigitalZoom"},
{ 0x000B, "Sharpness"},
{ 0x000C, "Contrast"},
{ 0x000D, "Saturation"},
{ 0x0014, "CCDSensitivity"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_FUJI = {
{ 0x0000, "Version"},
{ 0x1000, "Quality"},
{ 0x1001, "Sharpness"},
{ 0x1002, "WhiteBalance"},
{ 0x1003, "Color"},
{ 0x1004, "Tone"},
{ 0x1010, "FlashMode"},
{ 0x1011, "FlashStrength"},
{ 0x1020, "Macro"},
{ 0x1021, "FocusMode"},
{ 0x1030, "SlowSync"},
{ 0x1031, "PictureMode"},
{ 0x1100, "ContTake"},
{ 0x1300, "BlurWarning"},
{ 0x1301, "FocusWarning"},
{ 0x1302, "AEWarning "},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_NIKON = {
{ 0x0003, "Quality"},
{ 0x0004, "ColorMode"},
{ 0x0005, "ImageAdjustment"},
{ 0x0006, "CCDSensitivity"},
{ 0x0007, "WhiteBalance"},
{ 0x0008, "Focus"},
{ 0x000a, "DigitalZoom"},
{ 0x000b, "Converter"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_NIKON_990 = {
{ 0x0001, "Version"},
{ 0x0002, "ISOSetting"},
{ 0x0003, "ColorMode"},
{ 0x0004, "Quality"},
{ 0x0005, "WhiteBalance"},
{ 0x0006, "ImageSharpening"},
{ 0x0007, "FocusMode"},
{ 0x0008, "FlashSetting"},
{ 0x000F, "ISOSelection"},
{ 0x0080, "ImageAdjustment"},
{ 0x0082, "AuxiliaryLens"},
{ 0x0085, "ManualFocusDistance"},
{ 0x0086, "DigitalZoom"},
{ 0x0088, "AFFocusPosition"},
{ 0x0010, "DataDump"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_OLYMPUS = {
{ 0x0200, "SpecialMode"},
{ 0x0201, "JPEGQuality"},
{ 0x0202, "Macro"},
{ 0x0204, "DigitalZoom"},
{ 0x0207, "SoftwareRelease"},
{ 0x0208, "PictureInfo"},
{ 0x0209, "CameraId"},
{ 0x0F00, "DataDump"},
TAG_TABLE_END
};
typedef enum mn_byte_order_t {
MN_ORDER_INTEL = 0,
MN_ORDER_MOTOROLA = 1,
MN_ORDER_NORMAL
} mn_byte_order_t;
typedef enum mn_offset_mode_t {
MN_OFFSET_NORMAL,
MN_OFFSET_MAKER,
MN_OFFSET_GUESS
} mn_offset_mode_t;
typedef struct {
tag_table_type tag_table;
char * make;
char * model;
char * id_string;
int id_string_len;
int offset;
mn_byte_order_t byte_order;
mn_offset_mode_t offset_mode;
} maker_note_type;
static const maker_note_type maker_note_array[] = {
{ tag_table_VND_CANON, "Canon", NULL, NULL, 0, 0, MN_ORDER_INTEL, MN_OFFSET_GUESS},
/* { tag_table_VND_CANON, "Canon", NULL, NULL, 0, 0, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},*/
{ tag_table_VND_CASIO, "CASIO", NULL, NULL, 0, 0, MN_ORDER_MOTOROLA, MN_OFFSET_NORMAL},
{ tag_table_VND_FUJI, "FUJIFILM", NULL, "FUJIFILM\x0C\x00\x00\x00", 12, 12, MN_ORDER_INTEL, MN_OFFSET_MAKER},
{ tag_table_VND_NIKON, "NIKON", NULL, "Nikon\x00\x01\x00", 8, 8, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},
{ tag_table_VND_NIKON_990, "NIKON", NULL, NULL, 0, 0, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},
{ tag_table_VND_OLYMPUS, "OLYMPUS OPTICAL CO.,LTD", NULL, "OLYMP\x00\x01\x00", 8, 8, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},
};
/* }}} */
/* {{{ exif_get_tagname
Get headername for tag_num or NULL if not defined */
static char * exif_get_tagname(int tag_num, char *ret, int len, tag_table_type tag_table TSRMLS_DC)
{
int i, t;
char tmp[32];
for (i = 0; (t = tag_table[i].Tag) != TAG_END_OF_LIST; i++) {
if (t == tag_num) {
if (ret && len) {
strlcpy(ret, tag_table[i].Desc, abs(len));
if (len < 0) {
memset(ret + strlen(ret), ' ', -len - strlen(ret) - 1);
ret[-len - 1] = '\0';
}
return ret;
}
return tag_table[i].Desc;
}
}
if (ret && len) {
snprintf(tmp, sizeof(tmp), "UndefinedTag:0x%04X", tag_num);
strlcpy(ret, tmp, abs(len));
if (len < 0) {
memset(ret + strlen(ret), ' ', -len - strlen(ret) - 1);
ret[-len - 1] = '\0';
}
return ret;
}
return "";
}
/* }}} */
/* {{{ exif_char_dump
* Do not use! This is a debug function... */
#ifdef EXIF_DEBUG
static unsigned char* exif_char_dump(unsigned char * addr, int len, int offset)
{
static unsigned char buf[4096+1];
static unsigned char tmp[20];
int c, i, p=0, n = 5+31;
p += slprintf(buf+p, sizeof(buf)-p, "\nDump Len: %08X (%d)", len, len);
if (len) {
for(i=0; i<len+15 && p+n<=sizeof(buf); i++) {
if (i%16==0) {
p += slprintf(buf+p, sizeof(buf)-p, "\n%08X: ", i+offset);
}
if (i<len) {
c = *addr++;
p += slprintf(buf+p, sizeof(buf)-p, "%02X ", c);
tmp[i%16] = c>=32 ? c : '.';
tmp[(i%16)+1] = '\0';
} else {
p += slprintf(buf+p, sizeof(buf)-p, " ");
}
if (i%16==15) {
p += slprintf(buf+p, sizeof(buf)-p, " %s", tmp);
if (i>=len) {
break;
}
}
}
}
buf[sizeof(buf)-1] = '\0';
return buf;
}
#endif
/* }}} */
/* {{{ php_jpg_get16
Get 16 bits motorola order (always) for jpeg header stuff.
*/
static int php_jpg_get16(void *value)
{
return (((uchar *)value)[0] << 8) | ((uchar *)value)[1];
}
/* }}} */
/* {{{ php_ifd_get16u
* Convert a 16 bit unsigned value from file's native byte order */
static int php_ifd_get16u(void *value, int motorola_intel)
{
if (motorola_intel) {
return (((uchar *)value)[0] << 8) | ((uchar *)value)[1];
} else {
return (((uchar *)value)[1] << 8) | ((uchar *)value)[0];
}
}
/* }}} */
/* {{{ php_ifd_get16s
* Convert a 16 bit signed value from file's native byte order */
static signed short php_ifd_get16s(void *value, int motorola_intel)
{
return (signed short)php_ifd_get16u(value, motorola_intel);
}
/* }}} */
/* {{{ php_ifd_get32s
* Convert a 32 bit signed value from file's native byte order */
static int php_ifd_get32s(void *value, int motorola_intel)
{
if (motorola_intel) {
return (((char *)value)[0] << 24)
| (((uchar *)value)[1] << 16)
| (((uchar *)value)[2] << 8 )
| (((uchar *)value)[3] );
} else {
return (((char *)value)[3] << 24)
| (((uchar *)value)[2] << 16)
| (((uchar *)value)[1] << 8 )
| (((uchar *)value)[0] );
}
}
/* }}} */
/* {{{ php_ifd_get32u
* Write 32 bit unsigned value to data */
static unsigned php_ifd_get32u(void *value, int motorola_intel)
{
return (unsigned)php_ifd_get32s(value, motorola_intel) & 0xffffffff;
}
/* }}} */
/* {{{ php_ifd_set16u
* Write 16 bit unsigned value to data */
static void php_ifd_set16u(char *data, unsigned int value, int motorola_intel)
{
if (motorola_intel) {
data[0] = (value & 0xFF00) >> 8;
data[1] = (value & 0x00FF);
} else {
data[1] = (value & 0xFF00) >> 8;
data[0] = (value & 0x00FF);
}
}
/* }}} */
/* {{{ php_ifd_set32u
* Convert a 32 bit unsigned value from file's native byte order */
static void php_ifd_set32u(char *data, size_t value, int motorola_intel)
{
if (motorola_intel) {
data[0] = (value & 0xFF000000) >> 24;
data[1] = (value & 0x00FF0000) >> 16;
data[2] = (value & 0x0000FF00) >> 8;
data[3] = (value & 0x000000FF);
} else {
data[3] = (value & 0xFF000000) >> 24;
data[2] = (value & 0x00FF0000) >> 16;
data[1] = (value & 0x0000FF00) >> 8;
data[0] = (value & 0x000000FF);
}
}
/* }}} */
#ifdef EXIF_DEBUG
char * exif_dump_data(int *dump_free, int format, int components, int length, int motorola_intel, char *value_ptr TSRMLS_DC) /* {{{ */
{
char *dump;
int len;
*dump_free = 0;
if (format == TAG_FMT_STRING) {
return value_ptr ? value_ptr : "<no data>";
}
if (format == TAG_FMT_UNDEFINED) {
return "<undefined>\n";
}
if (format == TAG_FMT_IFD) {
return "";
}
if (format == TAG_FMT_SINGLE || format == TAG_FMT_DOUBLE) {
return "<not implemented>";
}
*dump_free = 1;
if (components > 1) {
len = spprintf(&dump, 0, "(%d,%d) {", components, length);
} else {
len = spprintf(&dump, 0, "{");
}
while(components > 0) {
switch(format) {
case TAG_FMT_BYTE:
case TAG_FMT_UNDEFINED:
case TAG_FMT_STRING:
case TAG_FMT_SBYTE:
dump = erealloc(dump, len + 4 + 1);
snprintf(dump + len, 4 + 1, "0x%02X", *value_ptr);
len += 4;
value_ptr++;
break;
case TAG_FMT_USHORT:
case TAG_FMT_SSHORT:
dump = erealloc(dump, len + 6 + 1);
snprintf(dump + len, 6 + 1, "0x%04X", php_ifd_get16s(value_ptr, motorola_intel));
len += 6;
value_ptr += 2;
break;
case TAG_FMT_ULONG:
case TAG_FMT_SLONG:
dump = erealloc(dump, len + 6 + 1);
snprintf(dump + len, 6 + 1, "0x%04X", php_ifd_get32s(value_ptr, motorola_intel));
len += 6;
value_ptr += 4;
break;
case TAG_FMT_URATIONAL:
case TAG_FMT_SRATIONAL:
dump = erealloc(dump, len + 13 + 1);
snprintf(dump + len, 13 + 1, "0x%04X/0x%04X", php_ifd_get32s(value_ptr, motorola_intel), php_ifd_get32s(value_ptr+4, motorola_intel));
len += 13;
value_ptr += 8;
break;
}
if (components > 0) {
dump = erealloc(dump, len + 2 + 1);
snprintf(dump + len, 2 + 1, ", ");
len += 2;
components--;
} else{
break;
}
}
dump = erealloc(dump, len + 1 + 1);
snprintf(dump + len, 1 + 1, "}");
return dump;
}
/* }}} */
#endif
/* {{{ exif_convert_any_format
* Evaluate number, be it int, rational, or float from directory. */
static double exif_convert_any_format(void *value, int format, int motorola_intel TSRMLS_DC)
{
int s_den;
unsigned u_den;
switch(format) {
case TAG_FMT_SBYTE: return *(signed char *)value;
case TAG_FMT_BYTE: return *(uchar *)value;
case TAG_FMT_USHORT: return php_ifd_get16u(value, motorola_intel);
case TAG_FMT_ULONG: return php_ifd_get32u(value, motorola_intel);
case TAG_FMT_URATIONAL:
u_den = php_ifd_get32u(4+(char *)value, motorola_intel);
if (u_den == 0) {
return 0;
} else {
return (double)php_ifd_get32u(value, motorola_intel) / u_den;
}
case TAG_FMT_SRATIONAL:
s_den = php_ifd_get32s(4+(char *)value, motorola_intel);
if (s_den == 0) {
return 0;
} else {
return (double)php_ifd_get32s(value, motorola_intel) / s_den;
}
case TAG_FMT_SSHORT: return (signed short)php_ifd_get16u(value, motorola_intel);
case TAG_FMT_SLONG: return php_ifd_get32s(value, motorola_intel);
/* Not sure if this is correct (never seen float used in Exif format) */
case TAG_FMT_SINGLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type single");
#endif
return (double)*(float *)value;
case TAG_FMT_DOUBLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type double");
#endif
return *(double *)value;
}
return 0;
}
/* }}} */
/* {{{ exif_convert_any_to_int
* Evaluate number, be it int, rational, or float from directory. */
static size_t exif_convert_any_to_int(void *value, int format, int motorola_intel TSRMLS_DC)
{
int s_den;
unsigned u_den;
switch(format) {
case TAG_FMT_SBYTE: return *(signed char *)value;
case TAG_FMT_BYTE: return *(uchar *)value;
case TAG_FMT_USHORT: return php_ifd_get16u(value, motorola_intel);
case TAG_FMT_ULONG: return php_ifd_get32u(value, motorola_intel);
case TAG_FMT_URATIONAL:
u_den = php_ifd_get32u(4+(char *)value, motorola_intel);
if (u_den == 0) {
return 0;
} else {
return php_ifd_get32u(value, motorola_intel) / u_den;
}
case TAG_FMT_SRATIONAL:
s_den = php_ifd_get32s(4+(char *)value, motorola_intel);
if (s_den == 0) {
return 0;
} else {
return php_ifd_get32s(value, motorola_intel) / s_den;
}
case TAG_FMT_SSHORT: return php_ifd_get16u(value, motorola_intel);
case TAG_FMT_SLONG: return php_ifd_get32s(value, motorola_intel);
/* Not sure if this is correct (never seen float used in Exif format) */
case TAG_FMT_SINGLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type single");
#endif
return (size_t)*(float *)value;
case TAG_FMT_DOUBLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type double");
#endif
return (size_t)*(double *)value;
}
return 0;
}
/* }}} */
/* {{{ struct image_info_value, image_info_list
*/
#ifndef WORD
#define WORD unsigned short
#endif
#ifndef DWORD
#define DWORD unsigned int
#endif
typedef struct {
int num;
int den;
} signed_rational;
typedef struct {
unsigned int num;
unsigned int den;
} unsigned_rational;
typedef union _image_info_value {
char *s;
unsigned u;
int i;
float f;
double d;
signed_rational sr;
unsigned_rational ur;
union _image_info_value *list;
} image_info_value;
typedef struct {
WORD tag;
WORD format;
DWORD length;
DWORD dummy; /* value ptr of tiff directory entry */
char *name;
image_info_value value;
} image_info_data;
typedef struct {
int count;
image_info_data *list;
} image_info_list;
/* }}} */
/* {{{ exif_get_sectionname
Returns the name of a section
*/
#define SECTION_FILE 0
#define SECTION_COMPUTED 1
#define SECTION_ANY_TAG 2
#define SECTION_IFD0 3
#define SECTION_THUMBNAIL 4
#define SECTION_COMMENT 5
#define SECTION_APP0 6
#define SECTION_EXIF 7
#define SECTION_FPIX 8
#define SECTION_GPS 9
#define SECTION_INTEROP 10
#define SECTION_APP12 11
#define SECTION_WINXP 12
#define SECTION_MAKERNOTE 13
#define SECTION_COUNT 14
#define FOUND_FILE (1<<SECTION_FILE)
#define FOUND_COMPUTED (1<<SECTION_COMPUTED)
#define FOUND_ANY_TAG (1<<SECTION_ANY_TAG)
#define FOUND_IFD0 (1<<SECTION_IFD0)
#define FOUND_THUMBNAIL (1<<SECTION_THUMBNAIL)
#define FOUND_COMMENT (1<<SECTION_COMMENT)
#define FOUND_APP0 (1<<SECTION_APP0)
#define FOUND_EXIF (1<<SECTION_EXIF)
#define FOUND_FPIX (1<<SECTION_FPIX)
#define FOUND_GPS (1<<SECTION_GPS)
#define FOUND_INTEROP (1<<SECTION_INTEROP)
#define FOUND_APP12 (1<<SECTION_APP12)
#define FOUND_WINXP (1<<SECTION_WINXP)
#define FOUND_MAKERNOTE (1<<SECTION_MAKERNOTE)
static char *exif_get_sectionname(int section)
{
switch(section) {
case SECTION_FILE: return "FILE";
case SECTION_COMPUTED: return "COMPUTED";
case SECTION_ANY_TAG: return "ANY_TAG";
case SECTION_IFD0: return "IFD0";
case SECTION_THUMBNAIL: return "THUMBNAIL";
case SECTION_COMMENT: return "COMMENT";
case SECTION_APP0: return "APP0";
case SECTION_EXIF: return "EXIF";
case SECTION_FPIX: return "FPIX";
case SECTION_GPS: return "GPS";
case SECTION_INTEROP: return "INTEROP";
case SECTION_APP12: return "APP12";
case SECTION_WINXP: return "WINXP";
case SECTION_MAKERNOTE: return "MAKERNOTE";
}
return "";
}
static tag_table_type exif_get_tag_table(int section)
{
switch(section) {
case SECTION_FILE: return &tag_table_IFD[0];
case SECTION_COMPUTED: return &tag_table_IFD[0];
case SECTION_ANY_TAG: return &tag_table_IFD[0];
case SECTION_IFD0: return &tag_table_IFD[0];
case SECTION_THUMBNAIL: return &tag_table_IFD[0];
case SECTION_COMMENT: return &tag_table_IFD[0];
case SECTION_APP0: return &tag_table_IFD[0];
case SECTION_EXIF: return &tag_table_IFD[0];
case SECTION_FPIX: return &tag_table_IFD[0];
case SECTION_GPS: return &tag_table_GPS[0];
case SECTION_INTEROP: return &tag_table_IOP[0];
case SECTION_APP12: return &tag_table_IFD[0];
case SECTION_WINXP: return &tag_table_IFD[0];
}
return &tag_table_IFD[0];
}
/* }}} */
/* {{{ exif_get_sectionlist
Return list of sectionnames specified by sectionlist. Return value must be freed
*/
static char *exif_get_sectionlist(int sectionlist TSRMLS_DC)
{
int i, len, ml = 0;
char *sections;
for(i=0; i<SECTION_COUNT; i++) {
ml += strlen(exif_get_sectionname(i))+2;
}
sections = safe_emalloc(ml, 1, 1);
sections[0] = '\0';
len = 0;
for(i=0; i<SECTION_COUNT; i++) {
if (sectionlist&(1<<i)) {
snprintf(sections+len, ml-len, "%s, ", exif_get_sectionname(i));
len = strlen(sections);
}
}
if (len>2)
sections[len-2] = '\0';
return sections;
}
/* }}} */
/* {{{ struct image_info_type
This structure stores Exif header image elements in a simple manner
Used to store camera data as extracted from the various ways that it can be
stored in a nexif header
*/
typedef struct {
int type;
size_t size;
uchar *data;
} file_section;
typedef struct {
int count;
file_section *list;
} file_section_list;
typedef struct {
image_filetype filetype;
size_t width, height;
size_t size;
size_t offset;
char *data;
} thumbnail_data;
typedef struct {
char *value;
size_t size;
int tag;
} xp_field_type;
typedef struct {
int count;
xp_field_type *list;
} xp_field_list;
/* This structure is used to store a section of a Jpeg file. */
typedef struct {
php_stream *infile;
char *FileName;
time_t FileDateTime;
size_t FileSize;
image_filetype FileType;
int Height, Width;
int IsColor;
char *make;
char *model;
float ApertureFNumber;
float ExposureTime;
double FocalplaneUnits;
float CCDWidth;
double FocalplaneXRes;
size_t ExifImageWidth;
float FocalLength;
float Distance;
int motorola_intel; /* 1 Motorola; 0 Intel */
char *UserComment;
int UserCommentLength;
char *UserCommentEncoding;
char *encode_unicode;
char *decode_unicode_be;
char *decode_unicode_le;
char *encode_jis;
char *decode_jis_be;
char *decode_jis_le;
char *Copyright;/* EXIF standard defines Copyright as "<Photographer> [ '\0' <Editor> ] ['\0']" */
char *CopyrightPhotographer;
char *CopyrightEditor;
xp_field_list xp_fields;
thumbnail_data Thumbnail;
/* other */
int sections_found; /* FOUND_<marker> */
image_info_list info_list[SECTION_COUNT];
/* for parsing */
int read_thumbnail;
int read_all;
int ifd_nesting_level;
/* internal */
file_section_list file;
} image_info_type;
/* }}} */
/* {{{ exif_error_docref */
static void exif_error_docref(const char *docref EXIFERR_DC, const image_info_type *ImageInfo, int type, const char *format, ...)
{
va_list args;
va_start(args, format);
#ifdef EXIF_DEBUG
{
char *buf;
spprintf(&buf, 0, "%s(%d): %s", _file, _line, format);
php_verror(docref, ImageInfo->FileName?ImageInfo->FileName:"", type, buf, args TSRMLS_CC);
efree(buf);
}
#else
php_verror(docref, ImageInfo->FileName?ImageInfo->FileName:"", type, format, args TSRMLS_CC);
#endif
va_end(args);
}
/* }}} */
/* {{{ jpeg_sof_info
*/
typedef struct {
int bits_per_sample;
size_t width;
size_t height;
int num_components;
} jpeg_sof_info;
/* }}} */
/* {{{ exif_file_sections_add
Add a file_section to image_info
returns the used block or -1. if size>0 and data == NULL buffer of size is allocated
*/
static int exif_file_sections_add(image_info_type *ImageInfo, int type, size_t size, uchar *data)
{
file_section *tmp;
int count = ImageInfo->file.count;
tmp = safe_erealloc(ImageInfo->file.list, (count+1), sizeof(file_section), 0);
ImageInfo->file.list = tmp;
ImageInfo->file.list[count].type = 0xFFFF;
ImageInfo->file.list[count].data = NULL;
ImageInfo->file.list[count].size = 0;
ImageInfo->file.count = count+1;
if (!size) {
data = NULL;
} else if (data == NULL) {
data = safe_emalloc(size, 1, 0);
}
ImageInfo->file.list[count].type = type;
ImageInfo->file.list[count].data = data;
ImageInfo->file.list[count].size = size;
return count;
}
/* }}} */
/* {{{ exif_file_sections_realloc
Reallocate a file section returns 0 on success and -1 on failure
*/
static int exif_file_sections_realloc(image_info_type *ImageInfo, int section_index, size_t size TSRMLS_DC)
{
void *tmp;
/* This is not a malloc/realloc check. It is a plausibility check for the
* function parameters (requirements engineering).
*/
if (section_index >= ImageInfo->file.count) {
EXIF_ERRLOG_FSREALLOC(ImageInfo)
return -1;
}
tmp = safe_erealloc(ImageInfo->file.list[section_index].data, 1, size, 0);
ImageInfo->file.list[section_index].data = tmp;
ImageInfo->file.list[section_index].size = size;
return 0;
}
/* }}} */
/* {{{ exif_file_section_free
Discard all file_sections in ImageInfo
*/
static int exif_file_sections_free(image_info_type *ImageInfo)
{
int i;
if (ImageInfo->file.count) {
for (i=0; i<ImageInfo->file.count; i++) {
EFREE_IF(ImageInfo->file.list[i].data);
}
}
EFREE_IF(ImageInfo->file.list);
ImageInfo->file.count = 0;
return TRUE;
}
/* }}} */
/* {{{ exif_iif_add_value
Add a value to image_info
*/
static void exif_iif_add_value(image_info_type *image_info, int section_index, char *name, int tag, int format, int length, void* value, int motorola_intel TSRMLS_DC)
{
size_t idex;
void *vptr;
image_info_value *info_value;
image_info_data *info_data;
image_info_data *list;
if (length < 0) {
return;
}
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
memset(info_data, 0, sizeof(image_info_data));
info_data->tag = tag;
info_data->format = format;
info_data->length = length;
info_data->name = estrdup(name);
info_value = &info_data->value;
switch (format) {
case TAG_FMT_STRING:
if (value) {
length = php_strnlen(value, length);
info_value->s = estrndup(value, length);
info_data->length = length;
} else {
info_data->length = 0;
info_value->s = estrdup("");
}
break;
default:
/* Standard says more types possible but skip them...
* but allow users to handle data if they know how to
* So not return but use type UNDEFINED
* return;
*/
info_data->tag = TAG_FMT_UNDEFINED;/* otherwise not freed from memory */
case TAG_FMT_SBYTE:
case TAG_FMT_BYTE:
/* in contrast to strings bytes do not need to allocate buffer for NULL if length==0 */
if (!length)
break;
case TAG_FMT_UNDEFINED:
if (value) {
if (tag == TAG_MAKER_NOTE) {
length = MIN(length, strlen(value));
}
/* do not recompute length here */
info_value->s = estrndup(value, length);
info_data->length = length;
} else {
info_data->length = 0;
info_value->s = estrdup("");
}
break;
case TAG_FMT_USHORT:
case TAG_FMT_ULONG:
case TAG_FMT_URATIONAL:
case TAG_FMT_SSHORT:
case TAG_FMT_SLONG:
case TAG_FMT_SRATIONAL:
case TAG_FMT_SINGLE:
case TAG_FMT_DOUBLE:
if (length==0) {
break;
} else
if (length>1) {
info_value->list = safe_emalloc(length, sizeof(image_info_value), 0);
} else {
info_value = &info_data->value;
}
for (idex=0,vptr=value; idex<(size_t)length; idex++,vptr=(char *) vptr + php_tiff_bytes_per_format[format]) {
if (length>1) {
info_value = &info_data->value.list[idex];
}
switch (format) {
case TAG_FMT_USHORT:
info_value->u = php_ifd_get16u(vptr, motorola_intel);
break;
case TAG_FMT_ULONG:
info_value->u = php_ifd_get32u(vptr, motorola_intel);
break;
case TAG_FMT_URATIONAL:
info_value->ur.num = php_ifd_get32u(vptr, motorola_intel);
info_value->ur.den = php_ifd_get32u(4+(char *)vptr, motorola_intel);
break;
case TAG_FMT_SSHORT:
info_value->i = php_ifd_get16s(vptr, motorola_intel);
break;
case TAG_FMT_SLONG:
info_value->i = php_ifd_get32s(vptr, motorola_intel);
break;
case TAG_FMT_SRATIONAL:
info_value->sr.num = php_ifd_get32u(vptr, motorola_intel);
info_value->sr.den = php_ifd_get32u(4+(char *)vptr, motorola_intel);
break;
case TAG_FMT_SINGLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Found value of type single");
#endif
info_value->f = *(float *)value;
case TAG_FMT_DOUBLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Found value of type double");
#endif
info_value->d = *(double *)value;
break;
}
}
}
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
/* }}} */
/* {{{ exif_iif_add_tag
Add a tag from IFD to image_info
*/
static void exif_iif_add_tag(image_info_type *image_info, int section_index, char *name, int tag, int format, size_t length, void* value TSRMLS_DC)
{
exif_iif_add_value(image_info, section_index, name, tag, format, (int)length, value, image_info->motorola_intel TSRMLS_CC);
}
/* }}} */
/* {{{ exif_iif_add_int
Add an int value to image_info
*/
static void exif_iif_add_int(image_info_type *image_info, int section_index, char *name, int value TSRMLS_DC)
{
image_info_data *info_data;
image_info_data *list;
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
info_data->tag = TAG_NONE;
info_data->format = TAG_FMT_SLONG;
info_data->length = 1;
info_data->name = estrdup(name);
info_data->value.i = value;
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
/* }}} */
/* {{{ exif_iif_add_str
Add a string value to image_info MUST BE NUL TERMINATED
*/
static void exif_iif_add_str(image_info_type *image_info, int section_index, char *name, char *value TSRMLS_DC)
{
image_info_data *info_data;
image_info_data *list;
if (value) {
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
info_data->tag = TAG_NONE;
info_data->format = TAG_FMT_STRING;
info_data->length = 1;
info_data->name = estrdup(name);
info_data->value.s = estrdup(value);
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
}
/* }}} */
/* {{{ exif_iif_add_fmt
Add a format string value to image_info MUST BE NUL TERMINATED
*/
static void exif_iif_add_fmt(image_info_type *image_info, int section_index, char *name TSRMLS_DC, char *value, ...)
{
char *tmp;
va_list arglist;
va_start(arglist, value);
if (value) {
vspprintf(&tmp, 0, value, arglist);
exif_iif_add_str(image_info, section_index, name, tmp TSRMLS_CC);
efree(tmp);
}
va_end(arglist);
}
/* }}} */
/* {{{ exif_iif_add_str
Add a string value to image_info MUST BE NUL TERMINATED
*/
static void exif_iif_add_buffer(image_info_type *image_info, int section_index, char *name, int length, char *value TSRMLS_DC)
{
image_info_data *info_data;
image_info_data *list;
if (value) {
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
info_data->tag = TAG_NONE;
info_data->format = TAG_FMT_UNDEFINED;
info_data->length = length;
info_data->name = estrdup(name);
info_data->value.s = safe_emalloc(length, 1, 1);
memcpy(info_data->value.s, value, length);
info_data->value.s[length] = 0;
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
}
/* }}} */
/* {{{ exif_iif_free
Free memory allocated for image_info
*/
static void exif_iif_free(image_info_type *image_info, int section_index) {
int i;
void *f; /* faster */
if (image_info->info_list[section_index].count) {
for (i=0; i < image_info->info_list[section_index].count; i++) {
if ((f=image_info->info_list[section_index].list[i].name) != NULL) {
efree(f);
}
switch(image_info->info_list[section_index].list[i].format) {
case TAG_FMT_SBYTE:
case TAG_FMT_BYTE:
/* in contrast to strings bytes do not need to allocate buffer for NULL if length==0 */
if (image_info->info_list[section_index].list[i].length<1)
break;
default:
case TAG_FMT_UNDEFINED:
case TAG_FMT_STRING:
if ((f=image_info->info_list[section_index].list[i].value.s) != NULL) {
efree(f);
}
break;
case TAG_FMT_USHORT:
case TAG_FMT_ULONG:
case TAG_FMT_URATIONAL:
case TAG_FMT_SSHORT:
case TAG_FMT_SLONG:
case TAG_FMT_SRATIONAL:
case TAG_FMT_SINGLE:
case TAG_FMT_DOUBLE:
/* nothing to do here */
if (image_info->info_list[section_index].list[i].length > 1) {
if ((f=image_info->info_list[section_index].list[i].value.list) != NULL) {
efree(f);
}
}
break;
}
}
}
EFREE_IF(image_info->info_list[section_index].list);
}
/* }}} */
/* {{{ add_assoc_image_info
* Add image_info to associative array value. */
static void add_assoc_image_info(zval *value, int sub_array, image_info_type *image_info, int section_index TSRMLS_DC)
{
char buffer[64], *val, *name, uname[64];
int i, ap, l, b, idx=0, unknown=0;
#ifdef EXIF_DEBUG
int info_tag;
#endif
image_info_value *info_value;
image_info_data *info_data;
zval *tmpi, *array = NULL;
#ifdef EXIF_DEBUG
/* php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Adding %d infos from section %s", image_info->info_list[section_index].count, exif_get_sectionname(section_index));*/
#endif
if (image_info->info_list[section_index].count) {
if (sub_array) {
MAKE_STD_ZVAL(tmpi);
array_init(tmpi);
} else {
tmpi = value;
}
for(i=0; i<image_info->info_list[section_index].count; i++) {
info_data = &image_info->info_list[section_index].list[i];
#ifdef EXIF_DEBUG
info_tag = info_data->tag; /* conversion */
#endif
info_value = &info_data->value;
if (!(name = info_data->name)) {
snprintf(uname, sizeof(uname), "%d", unknown++);
name = uname;
}
#ifdef EXIF_DEBUG
/* php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Adding infos: tag(0x%04X,%12s,L=0x%04X): %s", info_tag, exif_get_tagname(info_tag, buffer, -12, exif_get_tag_table(section_index) TSRMLS_CC), info_data->length, info_data->format==TAG_FMT_STRING?(info_value&&info_value->s?info_value->s:"<no data>"):exif_get_tagformat(info_data->format));*/
#endif
if (info_data->length==0) {
add_assoc_null(tmpi, name);
} else {
switch (info_data->format) {
default:
/* Standard says more types possible but skip them...
* but allow users to handle data if they know how to
* So not return but use type UNDEFINED
* return;
*/
case TAG_FMT_BYTE:
case TAG_FMT_SBYTE:
case TAG_FMT_UNDEFINED:
if (!info_value->s) {
add_assoc_stringl(tmpi, name, "", 0, 1);
} else {
add_assoc_stringl(tmpi, name, info_value->s, info_data->length, 1);
}
break;
case TAG_FMT_STRING:
if (!(val = info_value->s)) {
val = "";
}
if (section_index==SECTION_COMMENT) {
add_index_string(tmpi, idx++, val, 1);
} else {
add_assoc_string(tmpi, name, val, 1);
}
break;
case TAG_FMT_URATIONAL:
case TAG_FMT_SRATIONAL:
/*case TAG_FMT_BYTE:
case TAG_FMT_SBYTE:*/
case TAG_FMT_USHORT:
case TAG_FMT_SSHORT:
case TAG_FMT_SINGLE:
case TAG_FMT_DOUBLE:
case TAG_FMT_ULONG:
case TAG_FMT_SLONG:
/* now the rest, first see if it becomes an array */
if ((l = info_data->length) > 1) {
array = NULL;
MAKE_STD_ZVAL(array);
array_init(array);
}
for(ap=0; ap<l; ap++) {
if (l>1) {
info_value = &info_data->value.list[ap];
}
switch (info_data->format) {
case TAG_FMT_BYTE:
if (l>1) {
info_value = &info_data->value;
for (b=0;b<l;b++) {
add_index_long(array, b, (int)(info_value->s[b]));
}
break;
}
case TAG_FMT_USHORT:
case TAG_FMT_ULONG:
if (l==1) {
add_assoc_long(tmpi, name, (int)info_value->u);
} else {
add_index_long(array, ap, (int)info_value->u);
}
break;
case TAG_FMT_URATIONAL:
snprintf(buffer, sizeof(buffer), "%i/%i", info_value->ur.num, info_value->ur.den);
if (l==1) {
add_assoc_string(tmpi, name, buffer, 1);
} else {
add_index_string(array, ap, buffer, 1);
}
break;
case TAG_FMT_SBYTE:
if (l>1) {
info_value = &info_data->value;
for (b=0;b<l;b++) {
add_index_long(array, ap, (int)info_value->s[b]);
}
break;
}
case TAG_FMT_SSHORT:
case TAG_FMT_SLONG:
if (l==1) {
add_assoc_long(tmpi, name, info_value->i);
} else {
add_index_long(array, ap, info_value->i);
}
break;
case TAG_FMT_SRATIONAL:
snprintf(buffer, sizeof(buffer), "%i/%i", info_value->sr.num, info_value->sr.den);
if (l==1) {
add_assoc_string(tmpi, name, buffer, 1);
} else {
add_index_string(array, ap, buffer, 1);
}
break;
case TAG_FMT_SINGLE:
if (l==1) {
add_assoc_double(tmpi, name, info_value->f);
} else {
add_index_double(array, ap, info_value->f);
}
break;
case TAG_FMT_DOUBLE:
if (l==1) {
add_assoc_double(tmpi, name, info_value->d);
} else {
add_index_double(array, ap, info_value->d);
}
break;
}
info_value = &info_data->value.list[ap];
}
if (l>1) {
add_assoc_zval(tmpi, name, array);
}
break;
}
}
}
if (sub_array) {
add_assoc_zval(value, exif_get_sectionname(section_index), tmpi);
}
}
}
/* }}} */
/* {{{ Markers
JPEG markers consist of one or more 0xFF bytes, followed by a marker
code byte (which is not an FF). Here are the marker codes of interest
in this program. (See jdmarker.c for a more complete list.)
*/
#define M_TEM 0x01 /* temp for arithmetic coding */
#define M_RES 0x02 /* reserved */
#define M_SOF0 0xC0 /* Start Of Frame N */
#define M_SOF1 0xC1 /* N indicates which compression process */
#define M_SOF2 0xC2 /* Only SOF0-SOF2 are now in common use */
#define M_SOF3 0xC3
#define M_DHT 0xC4
#define M_SOF5 0xC5 /* NB: codes C4 and CC are NOT SOF markers */
#define M_SOF6 0xC6
#define M_SOF7 0xC7
#define M_JPEG 0x08 /* reserved for extensions */
#define M_SOF9 0xC9
#define M_SOF10 0xCA
#define M_SOF11 0xCB
#define M_DAC 0xCC /* arithmetic table */
#define M_SOF13 0xCD
#define M_SOF14 0xCE
#define M_SOF15 0xCF
#define M_RST0 0xD0 /* restart segment */
#define M_RST1 0xD1
#define M_RST2 0xD2
#define M_RST3 0xD3
#define M_RST4 0xD4
#define M_RST5 0xD5
#define M_RST6 0xD6
#define M_RST7 0xD7
#define M_SOI 0xD8 /* Start Of Image (beginning of datastream) */
#define M_EOI 0xD9 /* End Of Image (end of datastream) */
#define M_SOS 0xDA /* Start Of Scan (begins compressed data) */
#define M_DQT 0xDB
#define M_DNL 0xDC
#define M_DRI 0xDD
#define M_DHP 0xDE
#define M_EXP 0xDF
#define M_APP0 0xE0 /* JPEG: 'JFIFF' AND (additional 'JFXX') */
#define M_EXIF 0xE1 /* Exif Attribute Information */
#define M_APP2 0xE2 /* Flash Pix Extension Data? */
#define M_APP3 0xE3
#define M_APP4 0xE4
#define M_APP5 0xE5
#define M_APP6 0xE6
#define M_APP7 0xE7
#define M_APP8 0xE8
#define M_APP9 0xE9
#define M_APP10 0xEA
#define M_APP11 0xEB
#define M_APP12 0xEC
#define M_APP13 0xED /* IPTC International Press Telecommunications Council */
#define M_APP14 0xEE /* Software, Copyright? */
#define M_APP15 0xEF
#define M_JPG0 0xF0
#define M_JPG1 0xF1
#define M_JPG2 0xF2
#define M_JPG3 0xF3
#define M_JPG4 0xF4
#define M_JPG5 0xF5
#define M_JPG6 0xF6
#define M_JPG7 0xF7
#define M_JPG8 0xF8
#define M_JPG9 0xF9
#define M_JPG10 0xFA
#define M_JPG11 0xFB
#define M_JPG12 0xFC
#define M_JPG13 0xFD
#define M_COM 0xFE /* COMment */
#define M_PSEUDO 0x123 /* Extra value. */
/* }}} */
/* {{{ jpeg2000 markers
*/
/* Markers x30 - x3F do not have a segment */
/* Markers x00, x01, xFE, xC0 - xDF ISO/IEC 10918-1 -> M_<xx> */
/* Markers xF0 - xF7 ISO/IEC 10918-3 */
/* Markers xF7 - xF8 ISO/IEC 14495-1 */
/* XY=Main/Tile-header:(R:required, N:not_allowed, O:optional, L:last_marker) */
#define JC_SOC 0x4F /* NN, Start of codestream */
#define JC_SIZ 0x51 /* RN, Image and tile size */
#define JC_COD 0x52 /* RO, Codeing style defaulte */
#define JC_COC 0x53 /* OO, Coding style component */
#define JC_TLM 0x55 /* ON, Tile part length main header */
#define JC_PLM 0x57 /* ON, Packet length main header */
#define JC_PLT 0x58 /* NO, Packet length tile part header */
#define JC_QCD 0x5C /* RO, Quantization default */
#define JC_QCC 0x5D /* OO, Quantization component */
#define JC_RGN 0x5E /* OO, Region of interest */
#define JC_POD 0x5F /* OO, Progression order default */
#define JC_PPM 0x60 /* ON, Packed packet headers main header */
#define JC_PPT 0x61 /* NO, Packet packet headers tile part header */
#define JC_CME 0x64 /* OO, Comment: "LL E <text>" E=0:binary, E=1:ascii */
#define JC_SOT 0x90 /* NR, Start of tile */
#define JC_SOP 0x91 /* NO, Start of packeter default */
#define JC_EPH 0x92 /* NO, End of packet header */
#define JC_SOD 0x93 /* NL, Start of data */
#define JC_EOC 0xD9 /* NN, End of codestream */
/* }}} */
/* {{{ exif_process_COM
Process a COM marker.
We want to print out the marker contents as legible text;
we must guard against random junk and varying newline representations.
*/
static void exif_process_COM (image_info_type *image_info, char *value, size_t length TSRMLS_DC)
{
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_STRING, length-2, value+2 TSRMLS_CC);
}
/* }}} */
/* {{{ exif_process_CME
Process a CME marker.
We want to print out the marker contents as legible text;
we must guard against random junk and varying newline representations.
*/
#ifdef EXIF_JPEG2000
static void exif_process_CME (image_info_type *image_info, char *value, size_t length TSRMLS_DC)
{
if (length>3) {
switch(value[2]) {
case 0:
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_UNDEFINED, length, value TSRMLS_CC);
break;
case 1:
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_STRING, length, value);
break;
default:
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Undefined JPEG2000 comment encoding");
break;
}
} else {
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_UNDEFINED, 0, NULL);
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "JPEG2000 comment section too small");
}
}
#endif
/* }}} */
/* {{{ exif_process_SOFn
* Process a SOFn marker. This is useful for the image dimensions */
static void exif_process_SOFn (uchar *Data, int marker, jpeg_sof_info *result)
{
/* 0xFF SOSn SectLen(2) Bits(1) Height(2) Width(2) Channels(1) 3*Channels (1) */
result->bits_per_sample = Data[2];
result->height = php_jpg_get16(Data+3);
result->width = php_jpg_get16(Data+5);
result->num_components = Data[7];
/* switch (marker) {
case M_SOF0: process = "Baseline"; break;
case M_SOF1: process = "Extended sequential"; break;
case M_SOF2: process = "Progressive"; break;
case M_SOF3: process = "Lossless"; break;
case M_SOF5: process = "Differential sequential"; break;
case M_SOF6: process = "Differential progressive"; break;
case M_SOF7: process = "Differential lossless"; break;
case M_SOF9: process = "Extended sequential, arithmetic coding"; break;
case M_SOF10: process = "Progressive, arithmetic coding"; break;
case M_SOF11: process = "Lossless, arithmetic coding"; break;
case M_SOF13: process = "Differential sequential, arithmetic coding"; break;
case M_SOF14: process = "Differential progressive, arithmetic coding"; break;
case M_SOF15: process = "Differential lossless, arithmetic coding"; break;
default: process = "Unknown"; break;
}*/
}
/* }}} */
/* forward declarations */
static int exif_process_IFD_in_JPEG(image_info_type *ImageInfo, char *dir_start, char *offset_base, size_t IFDlength, size_t displacement, int section_index TSRMLS_DC);
static int exif_process_IFD_TAG( image_info_type *ImageInfo, char *dir_entry, char *offset_base, size_t IFDlength, size_t displacement, int section_index, int ReadNextIFD, tag_table_type tag_table TSRMLS_DC);
/* {{{ exif_get_markername
Get name of marker */
#ifdef EXIF_DEBUG
static char * exif_get_markername(int marker)
{
switch(marker) {
case 0xC0: return "SOF0";
case 0xC1: return "SOF1";
case 0xC2: return "SOF2";
case 0xC3: return "SOF3";
case 0xC4: return "DHT";
case 0xC5: return "SOF5";
case 0xC6: return "SOF6";
case 0xC7: return "SOF7";
case 0xC9: return "SOF9";
case 0xCA: return "SOF10";
case 0xCB: return "SOF11";
case 0xCD: return "SOF13";
case 0xCE: return "SOF14";
case 0xCF: return "SOF15";
case 0xD8: return "SOI";
case 0xD9: return "EOI";
case 0xDA: return "SOS";
case 0xDB: return "DQT";
case 0xDC: return "DNL";
case 0xDD: return "DRI";
case 0xDE: return "DHP";
case 0xDF: return "EXP";
case 0xE0: return "APP0";
case 0xE1: return "EXIF";
case 0xE2: return "FPIX";
case 0xE3: return "APP3";
case 0xE4: return "APP4";
case 0xE5: return "APP5";
case 0xE6: return "APP6";
case 0xE7: return "APP7";
case 0xE8: return "APP8";
case 0xE9: return "APP9";
case 0xEA: return "APP10";
case 0xEB: return "APP11";
case 0xEC: return "APP12";
case 0xED: return "APP13";
case 0xEE: return "APP14";
case 0xEF: return "APP15";
case 0xF0: return "JPG0";
case 0xFD: return "JPG13";
case 0xFE: return "COM";
case 0x01: return "TEM";
}
return "Unknown";
}
#endif
/* }}} */
/* {{{ proto string exif_tagname(index)
Get headername for index or false if not defined */
PHP_FUNCTION(exif_tagname)
{
long tag;
char *szTemp;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &tag) == FAILURE) {
return;
}
szTemp = exif_get_tagname(tag, NULL, 0, tag_table_IFD TSRMLS_CC);
if (tag < 0 || !szTemp || !szTemp[0]) {
RETURN_FALSE;
}
RETURN_STRING(szTemp, 1)
}
/* }}} */
/* {{{ exif_ifd_make_value
* Create a value for an ifd from an info_data pointer */
static void* exif_ifd_make_value(image_info_data *info_data, int motorola_intel TSRMLS_DC) {
size_t byte_count;
char *value_ptr, *data_ptr;
size_t i;
image_info_value *info_value;
byte_count = php_tiff_bytes_per_format[info_data->format] * info_data->length;
value_ptr = safe_emalloc(max(byte_count, 4), 1, 0);
memset(value_ptr, 0, 4);
if (!info_data->length) {
return value_ptr;
}
if (info_data->format == TAG_FMT_UNDEFINED || info_data->format == TAG_FMT_STRING
|| (byte_count>1 && (info_data->format == TAG_FMT_BYTE || info_data->format == TAG_FMT_SBYTE))
) {
memmove(value_ptr, info_data->value.s, byte_count);
return value_ptr;
} else if (info_data->format == TAG_FMT_BYTE) {
*value_ptr = info_data->value.u;
return value_ptr;
} else if (info_data->format == TAG_FMT_SBYTE) {
*value_ptr = info_data->value.i;
return value_ptr;
} else {
data_ptr = value_ptr;
for(i=0; i<info_data->length; i++) {
if (info_data->length==1) {
info_value = &info_data->value;
} else {
info_value = &info_data->value.list[i];
}
switch(info_data->format) {
case TAG_FMT_USHORT:
php_ifd_set16u(data_ptr, info_value->u, motorola_intel);
data_ptr += 2;
break;
case TAG_FMT_ULONG:
php_ifd_set32u(data_ptr, info_value->u, motorola_intel);
data_ptr += 4;
break;
case TAG_FMT_SSHORT:
php_ifd_set16u(data_ptr, info_value->i, motorola_intel);
data_ptr += 2;
break;
case TAG_FMT_SLONG:
php_ifd_set32u(data_ptr, info_value->i, motorola_intel);
data_ptr += 4;
break;
case TAG_FMT_URATIONAL:
php_ifd_set32u(data_ptr, info_value->sr.num, motorola_intel);
php_ifd_set32u(data_ptr+4, info_value->sr.den, motorola_intel);
data_ptr += 8;
break;
case TAG_FMT_SRATIONAL:
php_ifd_set32u(data_ptr, info_value->ur.num, motorola_intel);
php_ifd_set32u(data_ptr+4, info_value->ur.den, motorola_intel);
data_ptr += 8;
break;
case TAG_FMT_SINGLE:
memmove(data_ptr, &info_value->f, 4);
data_ptr += 4;
break;
case TAG_FMT_DOUBLE:
memmove(data_ptr, &info_value->d, 8);
data_ptr += 8;
break;
}
}
}
return value_ptr;
}
/* }}} */
/* {{{ exif_thumbnail_build
* Check and build thumbnail */
static void exif_thumbnail_build(image_info_type *ImageInfo TSRMLS_DC) {
size_t new_size, new_move, new_value;
char *new_data;
void *value_ptr;
int i, byte_count;
image_info_list *info_list;
image_info_data *info_data;
#ifdef EXIF_DEBUG
char tagname[64];
#endif
if (!ImageInfo->read_thumbnail || !ImageInfo->Thumbnail.offset || !ImageInfo->Thumbnail.size) {
return; /* ignore this call */
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: filetype = %d", ImageInfo->Thumbnail.filetype);
#endif
switch(ImageInfo->Thumbnail.filetype) {
default:
case IMAGE_FILETYPE_JPEG:
/* done */
break;
case IMAGE_FILETYPE_TIFF_II:
case IMAGE_FILETYPE_TIFF_MM:
info_list = &ImageInfo->info_list[SECTION_THUMBNAIL];
new_size = 8 + 2 + info_list->count * 12 + 4;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: size of signature + directory(%d): 0x%02X", info_list->count, new_size);
#endif
new_value= new_size; /* offset for ifd values outside ifd directory */
for (i=0; i<info_list->count; i++) {
info_data = &info_list->list[i];
byte_count = php_tiff_bytes_per_format[info_data->format] * info_data->length;
if (byte_count > 4) {
new_size += byte_count;
}
}
new_move = new_size;
new_data = safe_erealloc(ImageInfo->Thumbnail.data, 1, ImageInfo->Thumbnail.size, new_size);
ImageInfo->Thumbnail.data = new_data;
memmove(ImageInfo->Thumbnail.data + new_move, ImageInfo->Thumbnail.data, ImageInfo->Thumbnail.size);
ImageInfo->Thumbnail.size += new_size;
/* fill in data */
if (ImageInfo->motorola_intel) {
memmove(new_data, "MM\x00\x2a\x00\x00\x00\x08", 8);
} else {
memmove(new_data, "II\x2a\x00\x08\x00\x00\x00", 8);
}
new_data += 8;
php_ifd_set16u(new_data, info_list->count, ImageInfo->motorola_intel);
new_data += 2;
for (i=0; i<info_list->count; i++) {
info_data = &info_list->list[i];
byte_count = php_tiff_bytes_per_format[info_data->format] * info_data->length;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: process tag(x%04X=%s): %s%s (%d bytes)", info_data->tag, exif_get_tagname(info_data->tag, tagname, -12, tag_table_IFD TSRMLS_CC), (info_data->length>1)&&info_data->format!=TAG_FMT_UNDEFINED&&info_data->format!=TAG_FMT_STRING?"ARRAY OF ":"", exif_get_tagformat(info_data->format), byte_count);
#endif
if (info_data->tag==TAG_STRIP_OFFSETS || info_data->tag==TAG_JPEG_INTERCHANGE_FORMAT) {
php_ifd_set16u(new_data + 0, info_data->tag, ImageInfo->motorola_intel);
php_ifd_set16u(new_data + 2, TAG_FMT_ULONG, ImageInfo->motorola_intel);
php_ifd_set32u(new_data + 4, 1, ImageInfo->motorola_intel);
php_ifd_set32u(new_data + 8, new_move, ImageInfo->motorola_intel);
} else {
php_ifd_set16u(new_data + 0, info_data->tag, ImageInfo->motorola_intel);
php_ifd_set16u(new_data + 2, info_data->format, ImageInfo->motorola_intel);
php_ifd_set32u(new_data + 4, info_data->length, ImageInfo->motorola_intel);
value_ptr = exif_ifd_make_value(info_data, ImageInfo->motorola_intel TSRMLS_CC);
if (byte_count <= 4) {
memmove(new_data+8, value_ptr, 4);
} else {
php_ifd_set32u(new_data+8, new_value, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: writing with value offset: 0x%04X + 0x%02X", new_value, byte_count);
#endif
memmove(ImageInfo->Thumbnail.data+new_value, value_ptr, byte_count);
new_value += byte_count;
}
efree(value_ptr);
}
new_data += 12;
}
memset(new_data, 0, 4); /* next ifd pointer */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: created");
#endif
break;
}
}
/* }}} */
/* {{{ exif_thumbnail_extract
* Grab the thumbnail, corrected */
static void exif_thumbnail_extract(image_info_type *ImageInfo, char *offset, size_t length TSRMLS_DC) {
if (ImageInfo->Thumbnail.data) {
exif_error_docref("exif_read_data#error_mult_thumb" EXIFERR_CC, ImageInfo, E_WARNING, "Multiple possible thumbnails");
return; /* Should not happen */
}
if (!ImageInfo->read_thumbnail) {
return; /* ignore this call */
}
/* according to exif2.1, the thumbnail is not supposed to be greater than 64K */
if (ImageInfo->Thumbnail.size >= 65536
|| ImageInfo->Thumbnail.size <= 0
|| ImageInfo->Thumbnail.offset <= 0
) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Illegal thumbnail size/offset");
return;
}
/* Check to make sure we are not going to go past the ExifLength */
if ((ImageInfo->Thumbnail.offset + ImageInfo->Thumbnail.size) > length) {
EXIF_ERRLOG_THUMBEOF(ImageInfo)
return;
}
ImageInfo->Thumbnail.data = estrndup(offset + ImageInfo->Thumbnail.offset, ImageInfo->Thumbnail.size);
exif_thumbnail_build(ImageInfo TSRMLS_CC);
}
/* }}} */
/* {{{ exif_process_undefined
* Copy a string/buffer in Exif header to a character string and return length of allocated buffer if any. */
static int exif_process_undefined(char **result, char *value, size_t byte_count TSRMLS_DC) {
/* we cannot use strlcpy - here the problem is that we have to copy NUL
* chars up to byte_count, we also have to add a single NUL character to
* force end of string.
* estrndup does not return length
*/
if (byte_count) {
(*result) = estrndup(value, byte_count); /* NULL @ byte_count!!! */
return byte_count+1;
}
return 0;
}
/* }}} */
/* {{{ exif_process_string_raw
* Copy a string in Exif header to a character string returns length of allocated buffer if any. */
static int exif_process_string_raw(char **result, char *value, size_t byte_count) {
/* we cannot use strlcpy - here the problem is that we have to copy NUL
* chars up to byte_count, we also have to add a single NUL character to
* force end of string.
*/
if (byte_count) {
(*result) = safe_emalloc(byte_count, 1, 1);
memcpy(*result, value, byte_count);
(*result)[byte_count] = '\0';
return byte_count+1;
}
return 0;
}
/* }}} */
/* {{{ exif_process_string
* Copy a string in Exif header to a character string and return length of allocated buffer if any.
* In contrast to exif_process_string this function does always return a string buffer */
static int exif_process_string(char **result, char *value, size_t byte_count TSRMLS_DC) {
/* we cannot use strlcpy - here the problem is that we cannot use strlen to
* determin length of string and we cannot use strlcpy with len=byte_count+1
* because then we might get into an EXCEPTION if we exceed an allocated
* memory page...so we use php_strnlen in conjunction with memcpy and add the NUL
* char.
* estrdup would sometimes allocate more memory and does not return length
*/
if ((byte_count=php_strnlen(value, byte_count)) > 0) {
return exif_process_undefined(result, value, byte_count TSRMLS_CC);
}
(*result) = estrndup("", 1); /* force empty string */
return byte_count+1;
}
/* }}} */
/* {{{ exif_process_user_comment
* Process UserComment in IFD. */
static int exif_process_user_comment(image_info_type *ImageInfo, char **pszInfoPtr, char **pszEncoding, char *szValuePtr, int ByteCount TSRMLS_DC)
{
int a;
char *decode;
size_t len;;
*pszEncoding = NULL;
/* Copy the comment */
if (ByteCount>=8) {
const zend_encoding *from, *to;
if (!memcmp(szValuePtr, "UNICODE\0", 8)) {
*pszEncoding = estrdup((const char*)szValuePtr);
szValuePtr = szValuePtr+8;
ByteCount -= 8;
/* First try to detect BOM: ZERO WIDTH NOBREAK SPACE (FEFF 16)
* since we have no encoding support for the BOM yet we skip that.
*/
if (!memcmp(szValuePtr, "\xFE\xFF", 2)) {
decode = "UCS-2BE";
szValuePtr = szValuePtr+2;
ByteCount -= 2;
} else if (!memcmp(szValuePtr, "\xFF\xFE", 2)) {
decode = "UCS-2LE";
szValuePtr = szValuePtr+2;
ByteCount -= 2;
} else if (ImageInfo->motorola_intel) {
decode = ImageInfo->decode_unicode_be;
} else {
decode = ImageInfo->decode_unicode_le;
}
to = zend_multibyte_fetch_encoding(ImageInfo->encode_unicode TSRMLS_CC);
from = zend_multibyte_fetch_encoding(decode TSRMLS_CC);
/* XXX this will fail again if encoding_converter returns on error something different than SIZE_MAX */
if (!to || !from || zend_multibyte_encoding_converter(
(unsigned char**)pszInfoPtr,
&len,
(unsigned char*)szValuePtr,
ByteCount,
to,
from
TSRMLS_CC) == (size_t)-1) {
len = exif_process_string_raw(pszInfoPtr, szValuePtr, ByteCount);
}
return len;
} else if (!memcmp(szValuePtr, "ASCII\0\0\0", 8)) {
*pszEncoding = estrdup((const char*)szValuePtr);
szValuePtr = szValuePtr+8;
ByteCount -= 8;
} else if (!memcmp(szValuePtr, "JIS\0\0\0\0\0", 8)) {
/* JIS should be tanslated to MB or we leave it to the user - leave it to the user */
*pszEncoding = estrdup((const char*)szValuePtr);
szValuePtr = szValuePtr+8;
ByteCount -= 8;
/* XXX this will fail again if encoding_converter returns on error something different than SIZE_MAX */
to = zend_multibyte_fetch_encoding(ImageInfo->encode_jis TSRMLS_CC);
from = zend_multibyte_fetch_encoding(ImageInfo->motorola_intel ? ImageInfo->decode_jis_be : ImageInfo->decode_jis_le TSRMLS_CC);
if (!to || !from || zend_multibyte_encoding_converter(
(unsigned char**)pszInfoPtr,
&len,
(unsigned char*)szValuePtr,
ByteCount,
to,
from
TSRMLS_CC) == (size_t)-1) {
len = exif_process_string_raw(pszInfoPtr, szValuePtr, ByteCount);
}
return len;
} else if (!memcmp(szValuePtr, "\0\0\0\0\0\0\0\0", 8)) {
/* 8 NULL means undefined and should be ASCII... */
*pszEncoding = estrdup("UNDEFINED");
szValuePtr = szValuePtr+8;
ByteCount -= 8;
}
}
/* Olympus has this padded with trailing spaces. Remove these first. */
if (ByteCount>0) {
for (a=ByteCount-1;a && szValuePtr[a]==' ';a--) {
(szValuePtr)[a] = '\0';
}
}
/* normal text without encoding */
exif_process_string(pszInfoPtr, szValuePtr, ByteCount TSRMLS_CC);
return strlen(*pszInfoPtr);
}
/* }}} */
/* {{{ exif_process_unicode
* Process unicode field in IFD. */
static int exif_process_unicode(image_info_type *ImageInfo, xp_field_type *xp_field, int tag, char *szValuePtr, int ByteCount TSRMLS_DC)
{
xp_field->tag = tag;
xp_field->value = NULL;
/* XXX this will fail again if encoding_converter returns on error something different than SIZE_MAX */
if (zend_multibyte_encoding_converter(
(unsigned char**)&xp_field->value,
&xp_field->size,
(unsigned char*)szValuePtr,
ByteCount,
zend_multibyte_fetch_encoding(ImageInfo->encode_unicode TSRMLS_CC),
zend_multibyte_fetch_encoding(ImageInfo->motorola_intel ? ImageInfo->decode_unicode_be : ImageInfo->decode_unicode_le TSRMLS_CC)
TSRMLS_CC) == (size_t)-1) {
xp_field->size = exif_process_string_raw(&xp_field->value, szValuePtr, ByteCount);
}
return xp_field->size;
}
/* }}} */
/* {{{ exif_process_IFD_in_MAKERNOTE
* Process nested IFDs directories in Maker Note. */
static int exif_process_IFD_in_MAKERNOTE(image_info_type *ImageInfo, char * value_ptr, int value_len, char *offset_base, size_t IFDlength, size_t displacement TSRMLS_DC)
{
int de, i=0, section_index = SECTION_MAKERNOTE;
int NumDirEntries, old_motorola_intel, offset_diff;
const maker_note_type *maker_note;
char *dir_start;
for (i=0; i<=sizeof(maker_note_array)/sizeof(maker_note_type); i++) {
if (i==sizeof(maker_note_array)/sizeof(maker_note_type)) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "No maker note data found. Detected maker: %s (length = %d)", ImageInfo->make, strlen(ImageInfo->make));
#endif
/* unknown manufacturer, not an error, use it as a string */
return TRUE;
}
maker_note = maker_note_array+i;
/*exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "check (%s,%s)", maker_note->make?maker_note->make:"", maker_note->model?maker_note->model:"");*/
if (maker_note->make && (!ImageInfo->make || strcmp(maker_note->make, ImageInfo->make)))
continue;
if (maker_note->model && (!ImageInfo->model || strcmp(maker_note->model, ImageInfo->model)))
continue;
if (maker_note->id_string && strncmp(maker_note->id_string, value_ptr, maker_note->id_string_len))
continue;
break;
}
if (maker_note->offset >= value_len) {
/* Do not go past the value end */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "IFD data too short: 0x%04X offset 0x%04X", value_len, maker_note->offset);
return FALSE;
}
dir_start = value_ptr + maker_note->offset;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process %s @x%04X + 0x%04X=%d: %s", exif_get_sectionname(section_index), (int)dir_start-(int)offset_base+maker_note->offset+displacement, value_len, value_len, exif_char_dump(value_ptr, value_len, (int)dir_start-(int)offset_base+maker_note->offset+displacement));
#endif
ImageInfo->sections_found |= FOUND_MAKERNOTE;
old_motorola_intel = ImageInfo->motorola_intel;
switch (maker_note->byte_order) {
case MN_ORDER_INTEL:
ImageInfo->motorola_intel = 0;
break;
case MN_ORDER_MOTOROLA:
ImageInfo->motorola_intel = 1;
break;
default:
case MN_ORDER_NORMAL:
break;
}
NumDirEntries = php_ifd_get16u(dir_start, ImageInfo->motorola_intel);
switch (maker_note->offset_mode) {
case MN_OFFSET_MAKER:
offset_base = value_ptr;
break;
case MN_OFFSET_GUESS:
if (maker_note->offset + 10 + 4 >= value_len) {
/* Can not read dir_start+10 since it's beyond value end */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "IFD data too short: 0x%04X", value_len);
return FALSE;
}
offset_diff = 2 + NumDirEntries*12 + 4 - php_ifd_get32u(dir_start+10, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Using automatic offset correction: 0x%04X", ((int)dir_start-(int)offset_base+maker_note->offset+displacement) + offset_diff);
#endif
if (offset_diff < 0 || offset_diff >= value_len ) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "IFD data bad offset: 0x%04X length 0x%04X", offset_diff, value_len);
return FALSE;
}
offset_base = value_ptr + offset_diff;
break;
default:
case MN_OFFSET_NORMAL:
break;
}
if ((2+NumDirEntries*12) > value_len) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size: 2 + 0x%04X*12 = 0x%04X > 0x%04X", NumDirEntries, 2+NumDirEntries*12, value_len);
return FALSE;
}
for (de=0;de<NumDirEntries;de++) {
if (!exif_process_IFD_TAG(ImageInfo, dir_start + 2 + 12 * de,
offset_base, IFDlength, displacement, section_index, 0, maker_note->tag_table TSRMLS_CC)) {
return FALSE;
}
}
ImageInfo->motorola_intel = old_motorola_intel;
/* NextDirOffset (must be NULL) = php_ifd_get32u(dir_start+2+12*de, ImageInfo->motorola_intel);*/
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Subsection %s done", exif_get_sectionname(SECTION_MAKERNOTE));
#endif
return TRUE;
}
/* }}} */
/* {{{ exif_process_IFD_TAG
* Process one of the nested IFDs directories. */
static int exif_process_IFD_TAG(image_info_type *ImageInfo, char *dir_entry, char *offset_base, size_t IFDlength, size_t displacement, int section_index, int ReadNextIFD, tag_table_type tag_table TSRMLS_DC)
{
size_t length;
int tag, format, components;
char *value_ptr, tagname[64], cbuf[32], *outside=NULL;
size_t byte_count, offset_val, fpos, fgot;
int64_t byte_count_signed;
xp_field_type *tmp_xp;
#ifdef EXIF_DEBUG
char *dump_data;
int dump_free;
#endif /* EXIF_DEBUG */
/* Protect against corrupt headers */
if (ImageInfo->ifd_nesting_level > MAX_IFD_NESTING_LEVEL) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "corrupt EXIF header: maximum directory nesting level reached");
return FALSE;
}
ImageInfo->ifd_nesting_level++;
tag = php_ifd_get16u(dir_entry, ImageInfo->motorola_intel);
format = php_ifd_get16u(dir_entry+2, ImageInfo->motorola_intel);
components = php_ifd_get32u(dir_entry+4, ImageInfo->motorola_intel);
if (!format || format > NUM_FORMATS) {
/* (-1) catches illegal zero case as unsigned underflows to positive large. */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal format code 0x%04X, suppose BYTE", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), format);
format = TAG_FMT_BYTE;
/*return TRUE;*/
}
if (components < 0) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal components(%ld)", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), components);
return FALSE;
}
byte_count_signed = (int64_t)components * php_tiff_bytes_per_format[format];
if (byte_count_signed < 0 || (byte_count_signed > INT32_MAX)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal byte_count", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC));
return FALSE;
}
byte_count = (size_t)byte_count_signed;
if (byte_count > 4) {
offset_val = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
/* If its bigger than 4 bytes, the dir entry contains an offset. */
value_ptr = offset_base+offset_val;
/*
dir_entry is ImageInfo->file.list[sn].data+2+i*12
offset_base is ImageInfo->file.list[sn].data-dir_offset
dir_entry - offset_base is dir_offset+2+i*12
*/
if (byte_count > IFDlength || offset_val > IFDlength-byte_count || value_ptr < dir_entry || offset_val < (size_t)(dir_entry-offset_base)) {
/* It is important to check for IMAGE_FILETYPE_TIFF
* JPEG does not use absolute pointers instead its pointers are
* relative to the start of the TIFF header in APP1 section. */
if (byte_count > ImageInfo->FileSize || offset_val>ImageInfo->FileSize-byte_count || (ImageInfo->FileType!=IMAGE_FILETYPE_TIFF_II && ImageInfo->FileType!=IMAGE_FILETYPE_TIFF_MM && ImageInfo->FileType!=IMAGE_FILETYPE_JPEG)) {
if (value_ptr < dir_entry) {
/* we can read this if offset_val > 0 */
/* some files have their values in other parts of the file */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal pointer offset(x%04X < x%04X)", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), offset_val, dir_entry);
} else {
/* this is for sure not allowed */
/* exception are IFD pointers */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal pointer offset(x%04X + x%04X = x%04X > x%04X)", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), offset_val, byte_count, offset_val+byte_count, IFDlength);
}
return FALSE;
}
if (byte_count>sizeof(cbuf)) {
/* mark as outside range and get buffer */
value_ptr = safe_emalloc(byte_count, 1, 0);
outside = value_ptr;
} else {
/* In most cases we only access a small range so
* it is faster to use a static buffer there
* BUT it offers also the possibility to have
* pointers read without the need to free them
* explicitley before returning. */
memset(&cbuf, 0, sizeof(cbuf));
value_ptr = cbuf;
}
fpos = php_stream_tell(ImageInfo->infile);
php_stream_seek(ImageInfo->infile, displacement+offset_val, SEEK_SET);
fgot = php_stream_tell(ImageInfo->infile);
if (fgot!=displacement+offset_val) {
EFREE_IF(outside);
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Wrong file pointer: 0x%08X != 0x%08X", fgot, displacement+offset_val);
return FALSE;
}
fgot = php_stream_read(ImageInfo->infile, value_ptr, byte_count);
php_stream_seek(ImageInfo->infile, fpos, SEEK_SET);
if (fgot<byte_count) {
EFREE_IF(outside);
EXIF_ERRLOG_FILEEOF(ImageInfo)
return FALSE;
}
}
} else {
/* 4 bytes or less and value is in the dir entry itself */
value_ptr = dir_entry+8;
offset_val= value_ptr-offset_base;
}
ImageInfo->sections_found |= FOUND_ANY_TAG;
#ifdef EXIF_DEBUG
dump_data = exif_dump_data(&dump_free, format, components, length, ImageInfo->motorola_intel, value_ptr TSRMLS_CC);
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process tag(x%04X=%s,@x%04X + x%04X(=%d)): %s%s %s", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), offset_val+displacement, byte_count, byte_count, (components>1)&&format!=TAG_FMT_UNDEFINED&&format!=TAG_FMT_STRING?"ARRAY OF ":"", exif_get_tagformat(format), dump_data);
if (dump_free) {
efree(dump_data);
}
#endif
if (section_index==SECTION_THUMBNAIL) {
if (!ImageInfo->Thumbnail.data) {
switch(tag) {
case TAG_IMAGEWIDTH:
case TAG_COMP_IMAGE_WIDTH:
ImageInfo->Thumbnail.width = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_IMAGEHEIGHT:
case TAG_COMP_IMAGE_HEIGHT:
ImageInfo->Thumbnail.height = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_STRIP_OFFSETS:
case TAG_JPEG_INTERCHANGE_FORMAT:
/* accept both formats */
ImageInfo->Thumbnail.offset = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_STRIP_BYTE_COUNTS:
if (ImageInfo->FileType == IMAGE_FILETYPE_TIFF_II || ImageInfo->FileType == IMAGE_FILETYPE_TIFF_MM) {
ImageInfo->Thumbnail.filetype = ImageInfo->FileType;
} else {
/* motorola is easier to read */
ImageInfo->Thumbnail.filetype = IMAGE_FILETYPE_TIFF_MM;
}
ImageInfo->Thumbnail.size = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_JPEG_INTERCHANGE_FORMAT_LEN:
if (ImageInfo->Thumbnail.filetype == IMAGE_FILETYPE_UNKNOWN) {
ImageInfo->Thumbnail.filetype = IMAGE_FILETYPE_JPEG;
ImageInfo->Thumbnail.size = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
}
break;
}
}
} else {
if (section_index==SECTION_IFD0 || section_index==SECTION_EXIF)
switch(tag) {
case TAG_COPYRIGHT:
/* check for "<photographer> NUL <editor> NUL" */
if (byte_count>1 && (length=php_strnlen(value_ptr, byte_count)) > 0) {
if (length<byte_count-1) {
/* When there are any characters after the first NUL */
ImageInfo->CopyrightPhotographer = estrdup(value_ptr);
ImageInfo->CopyrightEditor = estrndup(value_ptr+length+1, byte_count-length-1);
spprintf(&ImageInfo->Copyright, 0, "%s, %s", ImageInfo->CopyrightPhotographer, ImageInfo->CopyrightEditor);
/* format = TAG_FMT_UNDEFINED; this musn't be ASCII */
/* but we are not supposed to change this */
/* keep in mind that image_info does not store editor value */
} else {
ImageInfo->Copyright = estrndup(value_ptr, byte_count);
}
}
break;
case TAG_USERCOMMENT:
ImageInfo->UserCommentLength = exif_process_user_comment(ImageInfo, &(ImageInfo->UserComment), &(ImageInfo->UserCommentEncoding), value_ptr, byte_count TSRMLS_CC);
break;
case TAG_XP_TITLE:
case TAG_XP_COMMENTS:
case TAG_XP_AUTHOR:
case TAG_XP_KEYWORDS:
case TAG_XP_SUBJECT:
tmp_xp = (xp_field_type*)safe_erealloc(ImageInfo->xp_fields.list, (ImageInfo->xp_fields.count+1), sizeof(xp_field_type), 0);
ImageInfo->sections_found |= FOUND_WINXP;
ImageInfo->xp_fields.list = tmp_xp;
ImageInfo->xp_fields.count++;
exif_process_unicode(ImageInfo, &(ImageInfo->xp_fields.list[ImageInfo->xp_fields.count-1]), tag, value_ptr, byte_count TSRMLS_CC);
break;
case TAG_FNUMBER:
/* Simplest way of expressing aperture, so I trust it the most.
(overwrite previously computed value if there is one) */
ImageInfo->ApertureFNumber = (float)exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_APERTURE:
case TAG_MAX_APERTURE:
/* More relevant info always comes earlier, so only use this field if we don't
have appropriate aperture information yet. */
if (ImageInfo->ApertureFNumber == 0) {
ImageInfo->ApertureFNumber
= (float)exp(exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC)*log(2)*0.5);
}
break;
case TAG_SHUTTERSPEED:
/* More complicated way of expressing exposure time, so only use
this value if we don't already have it from somewhere else.
SHUTTERSPEED comes after EXPOSURE TIME
*/
if (ImageInfo->ExposureTime == 0) {
ImageInfo->ExposureTime
= (float)(1/exp(exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC)*log(2)));
}
break;
case TAG_EXPOSURETIME:
ImageInfo->ExposureTime = -1;
break;
case TAG_COMP_IMAGE_WIDTH:
ImageInfo->ExifImageWidth = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_FOCALPLANE_X_RES:
ImageInfo->FocalplaneXRes = exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_SUBJECT_DISTANCE:
/* Inidcates the distacne the autofocus camera is focused to.
Tends to be less accurate as distance increases. */
ImageInfo->Distance = (float)exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_FOCALPLANE_RESOLUTION_UNIT:
switch((int)exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC)) {
case 1: ImageInfo->FocalplaneUnits = 25.4; break; /* inch */
case 2:
/* According to the information I was using, 2 measn meters.
But looking at the Cannon powershot's files, inches is the only
sensible value. */
ImageInfo->FocalplaneUnits = 25.4;
break;
case 3: ImageInfo->FocalplaneUnits = 10; break; /* centimeter */
case 4: ImageInfo->FocalplaneUnits = 1; break; /* milimeter */
case 5: ImageInfo->FocalplaneUnits = .001; break; /* micrometer */
}
break;
case TAG_SUB_IFD:
if (format==TAG_FMT_IFD) {
/* If this is called we are either in a TIFFs thumbnail or a JPEG where we cannot handle it */
/* TIFF thumbnail: our data structure cannot store a thumbnail of a thumbnail */
/* JPEG do we have the data area and what to do with it */
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Skip SUB IFD");
}
break;
case TAG_MAKE:
ImageInfo->make = estrndup(value_ptr, byte_count);
break;
case TAG_MODEL:
ImageInfo->model = estrndup(value_ptr, byte_count);
break;
case TAG_MAKER_NOTE:
if (!exif_process_IFD_in_MAKERNOTE(ImageInfo, value_ptr, byte_count, offset_base, IFDlength, displacement TSRMLS_CC)) {
EFREE_IF(outside);
return FALSE;
}
break;
case TAG_EXIF_IFD_POINTER:
case TAG_GPS_IFD_POINTER:
case TAG_INTEROP_IFD_POINTER:
if (ReadNextIFD) {
char *Subdir_start;
int sub_section_index = 0;
switch(tag) {
case TAG_EXIF_IFD_POINTER:
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Found EXIF");
#endif
ImageInfo->sections_found |= FOUND_EXIF;
sub_section_index = SECTION_EXIF;
break;
case TAG_GPS_IFD_POINTER:
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Found GPS");
#endif
ImageInfo->sections_found |= FOUND_GPS;
sub_section_index = SECTION_GPS;
break;
case TAG_INTEROP_IFD_POINTER:
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Found INTEROPERABILITY");
#endif
ImageInfo->sections_found |= FOUND_INTEROP;
sub_section_index = SECTION_INTEROP;
break;
}
Subdir_start = offset_base + php_ifd_get32u(value_ptr, ImageInfo->motorola_intel);
if (Subdir_start < offset_base || Subdir_start > offset_base+IFDlength) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD Pointer");
return FALSE;
}
if (!exif_process_IFD_in_JPEG(ImageInfo, Subdir_start, offset_base, IFDlength, displacement, sub_section_index TSRMLS_CC)) {
return FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Subsection %s done", exif_get_sectionname(sub_section_index));
#endif
}
}
}
exif_iif_add_tag(ImageInfo, section_index, exif_get_tagname(tag, tagname, sizeof(tagname), tag_table TSRMLS_CC), tag, format, components, value_ptr TSRMLS_CC);
EFREE_IF(outside);
return TRUE;
}
/* }}} */
/* {{{ exif_process_IFD_in_JPEG
* Process one of the nested IFDs directories. */
static int exif_process_IFD_in_JPEG(image_info_type *ImageInfo, char *dir_start, char *offset_base, size_t IFDlength, size_t displacement, int section_index TSRMLS_DC)
{
int de;
int NumDirEntries;
int NextDirOffset;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process %s (x%04X(=%d))", exif_get_sectionname(section_index), IFDlength, IFDlength);
#endif
ImageInfo->sections_found |= FOUND_IFD0;
if ((dir_start + 2) >= (offset_base+IFDlength)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size");
return FALSE;
}
NumDirEntries = php_ifd_get16u(dir_start, ImageInfo->motorola_intel);
if ((dir_start+2+NumDirEntries*12) > (offset_base+IFDlength)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size: x%04X + 2 + x%04X*12 = x%04X > x%04X", (int)((size_t)dir_start+2-(size_t)offset_base), NumDirEntries, (int)((size_t)dir_start+2+NumDirEntries*12-(size_t)offset_base), IFDlength);
return FALSE;
}
for (de=0;de<NumDirEntries;de++) {
if (!exif_process_IFD_TAG(ImageInfo, dir_start + 2 + 12 * de,
offset_base, IFDlength, displacement, section_index, 1, exif_get_tag_table(section_index) TSRMLS_CC)) {
return FALSE;
}
}
/*
* Ignore IFD2 if it purportedly exists
*/
if (section_index == SECTION_THUMBNAIL) {
return TRUE;
}
/*
* Hack to make it process IDF1 I hope
* There are 2 IDFs, the second one holds the keys (0x0201 and 0x0202) to the thumbnail
*/
if ((dir_start+2+12*de + 4) >= (offset_base+IFDlength)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size");
return FALSE;
}
NextDirOffset = php_ifd_get32u(dir_start+2+12*de, ImageInfo->motorola_intel);
if (NextDirOffset) {
/* the next line seems false but here IFDlength means length of all IFDs */
if (offset_base + NextDirOffset < offset_base || offset_base + NextDirOffset > offset_base+IFDlength) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD offset");
return FALSE;
}
/* That is the IFD for the first thumbnail */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Expect next IFD to be thumbnail");
#endif
if (exif_process_IFD_in_JPEG(ImageInfo, offset_base + NextDirOffset, offset_base, IFDlength, displacement, SECTION_THUMBNAIL TSRMLS_CC)) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail size: 0x%04X", ImageInfo->Thumbnail.size);
#endif
if (ImageInfo->Thumbnail.filetype != IMAGE_FILETYPE_UNKNOWN
&& ImageInfo->Thumbnail.size
&& ImageInfo->Thumbnail.offset
&& ImageInfo->read_thumbnail
) {
exif_thumbnail_extract(ImageInfo, offset_base, IFDlength TSRMLS_CC);
}
return TRUE;
} else {
return FALSE;
}
}
return TRUE;
}
/* }}} */
/* {{{ exif_process_TIFF_in_JPEG
Process a TIFF header in a JPEG file
*/
static void exif_process_TIFF_in_JPEG(image_info_type *ImageInfo, char *CharBuf, size_t length, size_t displacement TSRMLS_DC)
{
unsigned exif_value_2a, offset_of_ifd;
/* set the thumbnail stuff to nothing so we can test to see if they get set up */
if (memcmp(CharBuf, "II", 2) == 0) {
ImageInfo->motorola_intel = 0;
} else if (memcmp(CharBuf, "MM", 2) == 0) {
ImageInfo->motorola_intel = 1;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF alignment marker");
return;
}
/* Check the next two values for correctness. */
if (length < 8) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF start (1)");
return;
}
exif_value_2a = php_ifd_get16u(CharBuf+2, ImageInfo->motorola_intel);
offset_of_ifd = php_ifd_get32u(CharBuf+4, ImageInfo->motorola_intel);
if (exif_value_2a != 0x2a || offset_of_ifd < 0x08) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF start (1)");
return;
}
if (offset_of_ifd > length) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid IFD start");
return;
}
ImageInfo->sections_found |= FOUND_IFD0;
/* First directory starts at offset 8. Offsets starts at 0. */
exif_process_IFD_in_JPEG(ImageInfo, CharBuf+offset_of_ifd, CharBuf, length/*-14*/, displacement, SECTION_IFD0 TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process TIFF in JPEG done");
#endif
/* Compute the CCD width, in milimeters. */
if (ImageInfo->FocalplaneXRes != 0) {
ImageInfo->CCDWidth = (float)(ImageInfo->ExifImageWidth * ImageInfo->FocalplaneUnits / ImageInfo->FocalplaneXRes);
}
}
/* }}} */
/* {{{ exif_process_APP1
Process an JPEG APP1 block marker
Describes all the drivel that most digital cameras include...
*/
static void exif_process_APP1(image_info_type *ImageInfo, char *CharBuf, size_t length, size_t displacement TSRMLS_DC)
{
/* Check the APP1 for Exif Identifier Code */
static const uchar ExifHeader[] = {0x45, 0x78, 0x69, 0x66, 0x00, 0x00};
if (length <= 8 || memcmp(CharBuf+2, ExifHeader, 6)) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Incorrect APP1 Exif Identifier Code");
return;
}
exif_process_TIFF_in_JPEG(ImageInfo, CharBuf + 8, length - 8, displacement+8 TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process APP1/EXIF done");
#endif
}
/* }}} */
/* {{{ exif_process_APP12
Process an JPEG APP12 block marker used by OLYMPUS
*/
static void exif_process_APP12(image_info_type *ImageInfo, char *buffer, size_t length TSRMLS_DC)
{
size_t l1, l2=0;
if ((l1 = php_strnlen(buffer+2, length-2)) > 0) {
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Company", TAG_NONE, TAG_FMT_STRING, l1, buffer+2 TSRMLS_CC);
if (length > 2+l1+1) {
l2 = php_strnlen(buffer+2+l1+1, length-2-l1-1);
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Info", TAG_NONE, TAG_FMT_STRING, l2, buffer+2+l1+1 TSRMLS_CC);
}
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process section APP12 with l1=%d, l2=%d done", l1, l2);
#endif
}
/* }}} */
/* {{{ exif_scan_JPEG_header
* Parse the marker stream until SOS or EOI is seen; */
static int exif_scan_JPEG_header(image_info_type *ImageInfo TSRMLS_DC)
{
int section, sn;
int marker = 0, last_marker = M_PSEUDO, comment_correction=1;
unsigned int ll, lh;
uchar *Data;
size_t fpos, size, got, itemlen;
jpeg_sof_info sof_info;
for(section=0;;section++) {
#ifdef EXIF_DEBUG
fpos = php_stream_tell(ImageInfo->infile);
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Needing section %d @ 0x%08X", ImageInfo->file.count, fpos);
#endif
/* get marker byte, swallowing possible padding */
/* some software does not count the length bytes of COM section */
/* one company doing so is very much envolved in JPEG... so we accept too */
if (last_marker==M_COM && comment_correction) {
comment_correction = 2;
}
do {
if ((marker = php_stream_getc(ImageInfo->infile)) == EOF) {
EXIF_ERRLOG_CORRUPT(ImageInfo)
return FALSE;
}
if (last_marker==M_COM && comment_correction>0) {
if (marker!=0xFF) {
marker = 0xff;
comment_correction--;
} else {
last_marker = M_PSEUDO; /* stop skipping 0 for M_COM */
}
}
} while (marker == 0xff);
if (last_marker==M_COM && !comment_correction) {
exif_error_docref("exif_read_data#error_mcom" EXIFERR_CC, ImageInfo, E_NOTICE, "Image has corrupt COM section: some software set wrong length information");
}
if (last_marker==M_COM && comment_correction)
return M_EOI; /* ah illegal: char after COM section not 0xFF */
fpos = php_stream_tell(ImageInfo->infile);
if (marker == 0xff) {
/* 0xff is legal padding, but if we get that many, something's wrong. */
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "To many padding bytes");
return FALSE;
}
/* Read the length of the section. */
if ((lh = php_stream_getc(ImageInfo->infile)) == EOF) {
EXIF_ERRLOG_CORRUPT(ImageInfo)
return FALSE;
}
if ((ll = php_stream_getc(ImageInfo->infile)) == EOF) {
EXIF_ERRLOG_CORRUPT(ImageInfo)
return FALSE;
}
itemlen = (lh << 8) | ll;
if (itemlen < 2) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s, Section length: 0x%02X%02X", EXIF_ERROR_CORRUPT, lh, ll);
#else
EXIF_ERRLOG_CORRUPT(ImageInfo)
#endif
return FALSE;
}
sn = exif_file_sections_add(ImageInfo, marker, itemlen+1, NULL);
Data = ImageInfo->file.list[sn].data;
/* Store first two pre-read bytes. */
Data[0] = (uchar)lh;
Data[1] = (uchar)ll;
got = php_stream_read(ImageInfo->infile, (char*)(Data+2), itemlen-2); /* Read the whole section. */
if (got != itemlen-2) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error reading from file: got=x%04X(=%d) != itemlen-2=x%04X(=%d)", got, got, itemlen-2, itemlen-2);
return FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process section(x%02X=%s) @ x%04X + x%04X(=%d)", marker, exif_get_markername(marker), fpos, itemlen, itemlen);
#endif
switch(marker) {
case M_SOS: /* stop before hitting compressed data */
/* If reading entire image is requested, read the rest of the data. */
if (ImageInfo->read_all) {
/* Determine how much file is left. */
fpos = php_stream_tell(ImageInfo->infile);
size = ImageInfo->FileSize - fpos;
sn = exif_file_sections_add(ImageInfo, M_PSEUDO, size, NULL);
Data = ImageInfo->file.list[sn].data;
got = php_stream_read(ImageInfo->infile, (char*)Data, size);
if (got != size) {
EXIF_ERRLOG_FILEEOF(ImageInfo)
return FALSE;
}
}
return TRUE;
case M_EOI: /* in case it's a tables-only JPEG stream */
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "No image in jpeg!");
return (ImageInfo->sections_found&(~FOUND_COMPUTED)) ? TRUE : FALSE;
case M_COM: /* Comment section */
exif_process_COM(ImageInfo, (char *)Data, itemlen TSRMLS_CC);
break;
case M_EXIF:
if (!(ImageInfo->sections_found&FOUND_IFD0)) {
/*ImageInfo->sections_found |= FOUND_EXIF;*/
/* Seen files from some 'U-lead' software with Vivitar scanner
that uses marker 31 later in the file (no clue what for!) */
exif_process_APP1(ImageInfo, (char *)Data, itemlen, fpos TSRMLS_CC);
}
break;
case M_APP12:
exif_process_APP12(ImageInfo, (char *)Data, itemlen TSRMLS_CC);
break;
case M_SOF0:
case M_SOF1:
case M_SOF2:
case M_SOF3:
case M_SOF5:
case M_SOF6:
case M_SOF7:
case M_SOF9:
case M_SOF10:
case M_SOF11:
case M_SOF13:
case M_SOF14:
case M_SOF15:
if ((itemlen - 2) < 6) {
return FALSE;
}
exif_process_SOFn(Data, marker, &sof_info);
ImageInfo->Width = sof_info.width;
ImageInfo->Height = sof_info.height;
if (sof_info.num_components == 3) {
ImageInfo->IsColor = 1;
} else {
ImageInfo->IsColor = 0;
}
break;
default:
/* skip any other marker silently. */
break;
}
/* keep track of last marker */
last_marker = marker;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Done");
#endif
return TRUE;
}
/* }}} */
/* {{{ exif_scan_thumbnail
* scan JPEG in thumbnail (memory) */
static int exif_scan_thumbnail(image_info_type *ImageInfo TSRMLS_DC)
{
uchar c, *data = (uchar*)ImageInfo->Thumbnail.data;
int n, marker;
size_t length=2, pos=0;
jpeg_sof_info sof_info;
if (!data) {
return FALSE; /* nothing to do here */
}
if (memcmp(data, "\xFF\xD8\xFF", 3)) {
if (!ImageInfo->Thumbnail.width && !ImageInfo->Thumbnail.height) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Thumbnail is not a JPEG image");
}
return FALSE;
}
for (;;) {
pos += length;
if (pos>=ImageInfo->Thumbnail.size)
return FALSE;
c = data[pos++];
if (pos>=ImageInfo->Thumbnail.size)
return FALSE;
if (c != 0xFF) {
return FALSE;
}
n = 8;
while ((c = data[pos++]) == 0xFF && n--) {
if (pos+3>=ImageInfo->Thumbnail.size)
return FALSE;
/* +3 = pos++ of next check when reaching marker + 2 bytes for length */
}
if (c == 0xFF)
return FALSE;
marker = c;
length = php_jpg_get16(data+pos);
if (pos+length>=ImageInfo->Thumbnail.size) {
return FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: process section(x%02X=%s) @ x%04X + x%04X", marker, exif_get_markername(marker), pos, length);
#endif
switch (marker) {
case M_SOF0:
case M_SOF1:
case M_SOF2:
case M_SOF3:
case M_SOF5:
case M_SOF6:
case M_SOF7:
case M_SOF9:
case M_SOF10:
case M_SOF11:
case M_SOF13:
case M_SOF14:
case M_SOF15:
/* handle SOFn block */
exif_process_SOFn(data+pos, marker, &sof_info);
ImageInfo->Thumbnail.height = sof_info.height;
ImageInfo->Thumbnail.width = sof_info.width;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: size: %d * %d", sof_info.width, sof_info.height);
#endif
return TRUE;
case M_SOS:
case M_EOI:
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Could not compute size of thumbnail");
return FALSE;
break;
default:
/* just skip */
break;
}
}
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Could not compute size of thumbnail");
return FALSE;
}
/* }}} */
/* {{{ exif_process_IFD_in_TIFF
* Parse the TIFF header; */
static int exif_process_IFD_in_TIFF(image_info_type *ImageInfo, size_t dir_offset, int section_index TSRMLS_DC)
{
int i, sn, num_entries, sub_section_index = 0;
unsigned char *dir_entry;
char tagname[64];
size_t ifd_size, dir_size, entry_offset, next_offset, entry_length, entry_value=0, fgot;
int entry_tag , entry_type;
tag_table_type tag_table = exif_get_tag_table(section_index);
if (ImageInfo->ifd_nesting_level > MAX_IFD_NESTING_LEVEL) {
return FALSE;
}
if (ImageInfo->FileSize >= dir_offset+2) {
sn = exif_file_sections_add(ImageInfo, M_PSEUDO, 2, NULL);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: filesize(x%04X), IFD dir(x%04X + x%04X)", ImageInfo->FileSize, dir_offset, 2);
#endif
php_stream_seek(ImageInfo->infile, dir_offset, SEEK_SET); /* we do not know the order of sections */
php_stream_read(ImageInfo->infile, (char*)ImageInfo->file.list[sn].data, 2);
num_entries = php_ifd_get16u(ImageInfo->file.list[sn].data, ImageInfo->motorola_intel);
dir_size = 2/*num dir entries*/ +12/*length of entry*/*num_entries +4/* offset to next ifd (points to thumbnail or NULL)*/;
if (ImageInfo->FileSize >= dir_offset+dir_size) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: filesize(x%04X), IFD dir(x%04X + x%04X), IFD entries(%d)", ImageInfo->FileSize, dir_offset+2, dir_size-2, num_entries);
#endif
if (exif_file_sections_realloc(ImageInfo, sn, dir_size TSRMLS_CC)) {
return FALSE;
}
php_stream_read(ImageInfo->infile, (char*)(ImageInfo->file.list[sn].data+2), dir_size-2);
/*exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Dump: %s", exif_char_dump(ImageInfo->file.list[sn].data, dir_size, 0));*/
next_offset = php_ifd_get32u(ImageInfo->file.list[sn].data + dir_size - 4, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF done, next offset x%04X", next_offset);
#endif
/* now we have the directory we can look how long it should be */
ifd_size = dir_size;
for(i=0;i<num_entries;i++) {
dir_entry = ImageInfo->file.list[sn].data+2+i*12;
entry_tag = php_ifd_get16u(dir_entry+0, ImageInfo->motorola_intel);
entry_type = php_ifd_get16u(dir_entry+2, ImageInfo->motorola_intel);
if (entry_type > NUM_FORMATS) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: tag(0x%04X,%12s): Illegal format code 0x%04X, switching to BYTE", entry_tag, exif_get_tagname(entry_tag, tagname, -12, tag_table TSRMLS_CC), entry_type);
/* Since this is repeated in exif_process_IFD_TAG make it a notice here */
/* and make it a warning in the exif_process_IFD_TAG which is called */
/* elsewhere. */
entry_type = TAG_FMT_BYTE;
/*The next line would break the image on writeback: */
/* php_ifd_set16u(dir_entry+2, entry_type, ImageInfo->motorola_intel);*/
}
entry_length = php_ifd_get32u(dir_entry+4, ImageInfo->motorola_intel) * php_tiff_bytes_per_format[entry_type];
if (entry_length <= 4) {
switch(entry_type) {
case TAG_FMT_USHORT:
entry_value = php_ifd_get16u(dir_entry+8, ImageInfo->motorola_intel);
break;
case TAG_FMT_SSHORT:
entry_value = php_ifd_get16s(dir_entry+8, ImageInfo->motorola_intel);
break;
case TAG_FMT_ULONG:
entry_value = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
break;
case TAG_FMT_SLONG:
entry_value = php_ifd_get32s(dir_entry+8, ImageInfo->motorola_intel);
break;
}
switch(entry_tag) {
case TAG_IMAGEWIDTH:
case TAG_COMP_IMAGE_WIDTH:
ImageInfo->Width = entry_value;
break;
case TAG_IMAGEHEIGHT:
case TAG_COMP_IMAGE_HEIGHT:
ImageInfo->Height = entry_value;
break;
case TAG_PHOTOMETRIC_INTERPRETATION:
switch (entry_value) {
case PMI_BLACK_IS_ZERO:
case PMI_WHITE_IS_ZERO:
case PMI_TRANSPARENCY_MASK:
ImageInfo->IsColor = 0;
break;
case PMI_RGB:
case PMI_PALETTE_COLOR:
case PMI_SEPARATED:
case PMI_YCBCR:
case PMI_CIELAB:
ImageInfo->IsColor = 1;
break;
}
break;
}
} else {
entry_offset = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
/* if entry needs expading ifd cache and entry is at end of current ifd cache. */
/* otherwise there may be huge holes between two entries */
if (entry_offset + entry_length > dir_offset + ifd_size
&& entry_offset == dir_offset + ifd_size) {
ifd_size = entry_offset + entry_length - dir_offset;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Resize struct: x%04X + x%04X - x%04X = x%04X", entry_offset, entry_length, dir_offset, ifd_size);
#endif
}
}
}
if (ImageInfo->FileSize >= dir_offset + ImageInfo->file.list[sn].size) {
if (ifd_size > dir_size) {
if (dir_offset + ifd_size > ImageInfo->FileSize) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than size of IFD(x%04X + x%04X)", ImageInfo->FileSize, dir_offset, ifd_size);
return FALSE;
}
if (exif_file_sections_realloc(ImageInfo, sn, ifd_size TSRMLS_CC)) {
return FALSE;
}
/* read values not stored in directory itself */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: filesize(x%04X), IFD(x%04X + x%04X)", ImageInfo->FileSize, dir_offset, ifd_size);
#endif
php_stream_read(ImageInfo->infile, (char*)(ImageInfo->file.list[sn].data+dir_size), ifd_size-dir_size);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF, done");
#endif
}
/* now process the tags */
for(i=0;i<num_entries;i++) {
dir_entry = ImageInfo->file.list[sn].data+2+i*12;
entry_tag = php_ifd_get16u(dir_entry+0, ImageInfo->motorola_intel);
entry_type = php_ifd_get16u(dir_entry+2, ImageInfo->motorola_intel);
/*entry_length = php_ifd_get32u(dir_entry+4, ImageInfo->motorola_intel);*/
if (entry_tag == TAG_EXIF_IFD_POINTER ||
entry_tag == TAG_INTEROP_IFD_POINTER ||
entry_tag == TAG_GPS_IFD_POINTER ||
entry_tag == TAG_SUB_IFD
) {
switch(entry_tag) {
case TAG_EXIF_IFD_POINTER:
ImageInfo->sections_found |= FOUND_EXIF;
sub_section_index = SECTION_EXIF;
break;
case TAG_GPS_IFD_POINTER:
ImageInfo->sections_found |= FOUND_GPS;
sub_section_index = SECTION_GPS;
break;
case TAG_INTEROP_IFD_POINTER:
ImageInfo->sections_found |= FOUND_INTEROP;
sub_section_index = SECTION_INTEROP;
break;
case TAG_SUB_IFD:
ImageInfo->sections_found |= FOUND_THUMBNAIL;
sub_section_index = SECTION_THUMBNAIL;
break;
}
entry_offset = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Next IFD: %s @x%04X", exif_get_sectionname(sub_section_index), entry_offset);
#endif
ImageInfo->ifd_nesting_level++;
exif_process_IFD_in_TIFF(ImageInfo, entry_offset, sub_section_index TSRMLS_CC);
if (section_index!=SECTION_THUMBNAIL && entry_tag==TAG_SUB_IFD) {
if (ImageInfo->Thumbnail.filetype != IMAGE_FILETYPE_UNKNOWN
&& ImageInfo->Thumbnail.size
&& ImageInfo->Thumbnail.offset
&& ImageInfo->read_thumbnail
) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "%s THUMBNAIL @0x%04X + 0x%04X", ImageInfo->Thumbnail.data ? "Ignore" : "Read", ImageInfo->Thumbnail.offset, ImageInfo->Thumbnail.size);
#endif
if (!ImageInfo->Thumbnail.data) {
ImageInfo->Thumbnail.data = safe_emalloc(ImageInfo->Thumbnail.size, 1, 0);
php_stream_seek(ImageInfo->infile, ImageInfo->Thumbnail.offset, SEEK_SET);
fgot = php_stream_read(ImageInfo->infile, ImageInfo->Thumbnail.data, ImageInfo->Thumbnail.size);
if (fgot < ImageInfo->Thumbnail.size) {
EXIF_ERRLOG_THUMBEOF(ImageInfo)
}
exif_thumbnail_build(ImageInfo TSRMLS_CC);
}
}
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Next IFD: %s done", exif_get_sectionname(sub_section_index));
#endif
} else {
if (!exif_process_IFD_TAG(ImageInfo, (char*)dir_entry,
(char*)(ImageInfo->file.list[sn].data-dir_offset),
ifd_size, 0, section_index, 0, tag_table TSRMLS_CC)) {
return FALSE;
}
}
}
/* If we had a thumbnail in a SUB_IFD we have ANOTHER image in NEXT IFD */
if (next_offset && section_index != SECTION_THUMBNAIL) {
/* this should be a thumbnail IFD */
/* the thumbnail itself is stored at Tag=StripOffsets */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read next IFD (THUMBNAIL) at x%04X", next_offset);
#endif
ImageInfo->ifd_nesting_level++;
exif_process_IFD_in_TIFF(ImageInfo, next_offset, SECTION_THUMBNAIL TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "%s THUMBNAIL @0x%04X + 0x%04X", ImageInfo->Thumbnail.data ? "Ignore" : "Read", ImageInfo->Thumbnail.offset, ImageInfo->Thumbnail.size);
#endif
if (!ImageInfo->Thumbnail.data && ImageInfo->Thumbnail.offset && ImageInfo->Thumbnail.size && ImageInfo->read_thumbnail) {
ImageInfo->Thumbnail.data = safe_emalloc(ImageInfo->Thumbnail.size, 1, 0);
php_stream_seek(ImageInfo->infile, ImageInfo->Thumbnail.offset, SEEK_SET);
fgot = php_stream_read(ImageInfo->infile, ImageInfo->Thumbnail.data, ImageInfo->Thumbnail.size);
if (fgot < ImageInfo->Thumbnail.size) {
EXIF_ERRLOG_THUMBEOF(ImageInfo)
efree(ImageInfo->Thumbnail.data);
ImageInfo->Thumbnail.data = NULL;
} else {
exif_thumbnail_build(ImageInfo TSRMLS_CC);
}
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read next IFD (THUMBNAIL) done");
#endif
}
return TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than size of IFD(x%04X)", ImageInfo->FileSize, dir_offset+ImageInfo->file.list[sn].size);
return FALSE;
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than size of IFD dir(x%04X)", ImageInfo->FileSize, dir_offset+dir_size);
return FALSE;
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than start of IFD dir(x%04X)", ImageInfo->FileSize, dir_offset+2);
return FALSE;
}
}
/* }}} */
/* {{{ exif_scan_FILE_header
* Parse the marker stream until SOS or EOI is seen; */
static int exif_scan_FILE_header(image_info_type *ImageInfo TSRMLS_DC)
{
unsigned char file_header[8];
int ret = FALSE;
ImageInfo->FileType = IMAGE_FILETYPE_UNKNOWN;
if (ImageInfo->FileSize >= 2) {
php_stream_seek(ImageInfo->infile, 0, SEEK_SET);
if (php_stream_read(ImageInfo->infile, (char*)file_header, 2) != 2) {
return FALSE;
}
if ((file_header[0]==0xff) && (file_header[1]==M_SOI)) {
ImageInfo->FileType = IMAGE_FILETYPE_JPEG;
if (exif_scan_JPEG_header(ImageInfo TSRMLS_CC)) {
ret = TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid JPEG file");
}
} else if (ImageInfo->FileSize >= 8) {
if (php_stream_read(ImageInfo->infile, (char*)(file_header+2), 6) != 6) {
return FALSE;
}
if (!memcmp(file_header, "II\x2A\x00", 4)) {
ImageInfo->FileType = IMAGE_FILETYPE_TIFF_II;
ImageInfo->motorola_intel = 0;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "File has TIFF/II format");
#endif
ImageInfo->sections_found |= FOUND_IFD0;
if (exif_process_IFD_in_TIFF(ImageInfo,
php_ifd_get32u(file_header + 4, ImageInfo->motorola_intel),
SECTION_IFD0 TSRMLS_CC)) {
ret = TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF file");
}
} else if (!memcmp(file_header, "MM\x00\x2a", 4)) {
ImageInfo->FileType = IMAGE_FILETYPE_TIFF_MM;
ImageInfo->motorola_intel = 1;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "File has TIFF/MM format");
#endif
ImageInfo->sections_found |= FOUND_IFD0;
if (exif_process_IFD_in_TIFF(ImageInfo,
php_ifd_get32u(file_header + 4, ImageInfo->motorola_intel),
SECTION_IFD0 TSRMLS_CC)) {
ret = TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF file");
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "File not supported");
return FALSE;
}
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "File too small (%d)", ImageInfo->FileSize);
}
return ret;
}
/* }}} */
/* {{{ exif_discard_imageinfo
Discard data scanned by exif_read_file.
*/
static int exif_discard_imageinfo(image_info_type *ImageInfo)
{
int i;
EFREE_IF(ImageInfo->FileName);
EFREE_IF(ImageInfo->UserComment);
EFREE_IF(ImageInfo->UserCommentEncoding);
EFREE_IF(ImageInfo->Copyright);
EFREE_IF(ImageInfo->CopyrightPhotographer);
EFREE_IF(ImageInfo->CopyrightEditor);
EFREE_IF(ImageInfo->Thumbnail.data);
EFREE_IF(ImageInfo->encode_unicode);
EFREE_IF(ImageInfo->decode_unicode_be);
EFREE_IF(ImageInfo->decode_unicode_le);
EFREE_IF(ImageInfo->encode_jis);
EFREE_IF(ImageInfo->decode_jis_be);
EFREE_IF(ImageInfo->decode_jis_le);
EFREE_IF(ImageInfo->make);
EFREE_IF(ImageInfo->model);
for (i=0; i<ImageInfo->xp_fields.count; i++) {
EFREE_IF(ImageInfo->xp_fields.list[i].value);
}
EFREE_IF(ImageInfo->xp_fields.list);
for (i=0; i<SECTION_COUNT; i++) {
exif_iif_free(ImageInfo, i);
}
exif_file_sections_free(ImageInfo);
memset(ImageInfo, 0, sizeof(*ImageInfo));
return TRUE;
}
/* }}} */
/* {{{ exif_read_file
*/
static int exif_read_file(image_info_type *ImageInfo, char *FileName, int read_thumbnail, int read_all TSRMLS_DC)
{
int ret;
struct stat st;
/* Start with an empty image information structure. */
memset(ImageInfo, 0, sizeof(*ImageInfo));
ImageInfo->motorola_intel = -1; /* flag as unknown */
ImageInfo->infile = php_stream_open_wrapper(FileName, "rb", STREAM_MUST_SEEK|IGNORE_PATH, NULL);
if (!ImageInfo->infile) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Unable to open file");
return FALSE;
}
if (php_stream_is(ImageInfo->infile, PHP_STREAM_IS_STDIO)) {
if (VCWD_STAT(FileName, &st) >= 0) {
if ((st.st_mode & S_IFMT) != S_IFREG) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Not a file");
php_stream_close(ImageInfo->infile);
return FALSE;
}
/* Store file date/time. */
ImageInfo->FileDateTime = st.st_mtime;
ImageInfo->FileSize = st.st_size;
/*exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Opened stream is file: %d", ImageInfo->FileSize);*/
}
} else {
if (!ImageInfo->FileSize) {
php_stream_seek(ImageInfo->infile, 0, SEEK_END);
ImageInfo->FileSize = php_stream_tell(ImageInfo->infile);
php_stream_seek(ImageInfo->infile, 0, SEEK_SET);
}
}
php_basename(FileName, strlen(FileName), NULL, 0, &(ImageInfo->FileName), NULL TSRMLS_CC);
ImageInfo->read_thumbnail = read_thumbnail;
ImageInfo->read_all = read_all;
ImageInfo->Thumbnail.filetype = IMAGE_FILETYPE_UNKNOWN;
ImageInfo->encode_unicode = safe_estrdup(EXIF_G(encode_unicode));
ImageInfo->decode_unicode_be = safe_estrdup(EXIF_G(decode_unicode_be));
ImageInfo->decode_unicode_le = safe_estrdup(EXIF_G(decode_unicode_le));
ImageInfo->encode_jis = safe_estrdup(EXIF_G(encode_jis));
ImageInfo->decode_jis_be = safe_estrdup(EXIF_G(decode_jis_be));
ImageInfo->decode_jis_le = safe_estrdup(EXIF_G(decode_jis_le));
ImageInfo->ifd_nesting_level = 0;
/* Scan the JPEG headers. */
ret = exif_scan_FILE_header(ImageInfo TSRMLS_CC);
php_stream_close(ImageInfo->infile);
return ret;
}
/* }}} */
/* {{{ proto array exif_read_data(string filename [, sections_needed [, sub_arrays[, read_thumbnail]]])
Reads header data from the JPEG/TIFF image filename and optionally reads the internal thumbnails */
PHP_FUNCTION(exif_read_data)
{
char *p_name, *p_sections_needed = NULL;
int p_name_len, p_sections_needed_len = 0;
zend_bool sub_arrays=0, read_thumbnail=0, read_all=0;
int i, ret, sections_needed=0;
image_info_type ImageInfo;
char tmp[64], *sections_str, *s;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sbb", &p_name, &p_name_len, &p_sections_needed, &p_sections_needed_len, &sub_arrays, &read_thumbnail) == FAILURE) {
return;
}
memset(&ImageInfo, 0, sizeof(ImageInfo));
if (p_sections_needed) {
spprintf(§ions_str, 0, ",%s,", p_sections_needed);
/* sections_str DOES start with , and SPACES are NOT allowed in names */
s = sections_str;
while (*++s) {
if (*s == ' ') {
*s = ',';
}
}
for (i = 0; i < SECTION_COUNT; i++) {
snprintf(tmp, sizeof(tmp), ",%s,", exif_get_sectionname(i));
if (strstr(sections_str, tmp)) {
sections_needed |= 1<<i;
}
}
EFREE_IF(sections_str);
/* now see what we need */
#ifdef EXIF_DEBUG
sections_str = exif_get_sectionlist(sections_needed TSRMLS_CC);
if (!sections_str) {
RETURN_FALSE;
}
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Sections needed: %s", sections_str[0] ? sections_str : "None");
EFREE_IF(sections_str);
#endif
}
ret = exif_read_file(&ImageInfo, p_name, read_thumbnail, read_all TSRMLS_CC);
sections_str = exif_get_sectionlist(ImageInfo.sections_found TSRMLS_CC);
#ifdef EXIF_DEBUG
if (sections_str)
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Sections found: %s", sections_str[0] ? sections_str : "None");
#endif
ImageInfo.sections_found |= FOUND_COMPUTED|FOUND_FILE;/* do not inform about in debug*/
if (ret == FALSE || (sections_needed && !(sections_needed&ImageInfo.sections_found))) {
/* array_init must be checked at last! otherwise the array must be freed if a later test fails. */
exif_discard_imageinfo(&ImageInfo);
EFREE_IF(sections_str);
RETURN_FALSE;
}
array_init(return_value);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Generate section FILE");
#endif
/* now we can add our information */
exif_iif_add_str(&ImageInfo, SECTION_FILE, "FileName", ImageInfo.FileName TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_FILE, "FileDateTime", ImageInfo.FileDateTime TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_FILE, "FileSize", ImageInfo.FileSize TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_FILE, "FileType", ImageInfo.FileType TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_FILE, "MimeType", (char*)php_image_type_to_mime_type(ImageInfo.FileType) TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_FILE, "SectionsFound", sections_str ? sections_str : "NONE" TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Generate section COMPUTED");
#endif
if (ImageInfo.Width>0 && ImageInfo.Height>0) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "html" TSRMLS_CC, "width=\"%d\" height=\"%d\"", ImageInfo.Width, ImageInfo.Height);
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Height", ImageInfo.Height TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Width", ImageInfo.Width TSRMLS_CC);
}
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "IsColor", ImageInfo.IsColor TSRMLS_CC);
if (ImageInfo.motorola_intel != -1) {
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "ByteOrderMotorola", ImageInfo.motorola_intel TSRMLS_CC);
}
if (ImageInfo.FocalLength) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "FocalLength" TSRMLS_CC, "%4.1Fmm", ImageInfo.FocalLength);
if(ImageInfo.CCDWidth) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "35mmFocalLength" TSRMLS_CC, "%dmm", (int)(ImageInfo.FocalLength/ImageInfo.CCDWidth*35+0.5));
}
}
if(ImageInfo.CCDWidth) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "CCDWidth" TSRMLS_CC, "%dmm", (int)ImageInfo.CCDWidth);
}
if(ImageInfo.ExposureTime>0) {
if(ImageInfo.ExposureTime <= 0.5) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "ExposureTime" TSRMLS_CC, "%0.3F s (1/%d)", ImageInfo.ExposureTime, (int)(0.5 + 1/ImageInfo.ExposureTime));
} else {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "ExposureTime" TSRMLS_CC, "%0.3F s", ImageInfo.ExposureTime);
}
}
if(ImageInfo.ApertureFNumber) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "ApertureFNumber" TSRMLS_CC, "f/%.1F", ImageInfo.ApertureFNumber);
}
if(ImageInfo.Distance) {
if(ImageInfo.Distance<0) {
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "FocusDistance", "Infinite" TSRMLS_CC);
} else {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "FocusDistance" TSRMLS_CC, "%0.2Fm", ImageInfo.Distance);
}
}
if (ImageInfo.UserComment) {
exif_iif_add_buffer(&ImageInfo, SECTION_COMPUTED, "UserComment", ImageInfo.UserCommentLength, ImageInfo.UserComment TSRMLS_CC);
if (ImageInfo.UserCommentEncoding && strlen(ImageInfo.UserCommentEncoding)) {
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "UserCommentEncoding", ImageInfo.UserCommentEncoding TSRMLS_CC);
}
}
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Copyright", ImageInfo.Copyright TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Copyright.Photographer", ImageInfo.CopyrightPhotographer TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Copyright.Editor", ImageInfo.CopyrightEditor TSRMLS_CC);
for (i=0; i<ImageInfo.xp_fields.count; i++) {
exif_iif_add_str(&ImageInfo, SECTION_WINXP, exif_get_tagname(ImageInfo.xp_fields.list[i].tag, NULL, 0, exif_get_tag_table(SECTION_WINXP) TSRMLS_CC), ImageInfo.xp_fields.list[i].value TSRMLS_CC);
}
if (ImageInfo.Thumbnail.size) {
if (read_thumbnail) {
/* not exif_iif_add_str : this is a buffer */
exif_iif_add_tag(&ImageInfo, SECTION_THUMBNAIL, "THUMBNAIL", TAG_NONE, TAG_FMT_UNDEFINED, ImageInfo.Thumbnail.size, ImageInfo.Thumbnail.data TSRMLS_CC);
}
if (!ImageInfo.Thumbnail.width || !ImageInfo.Thumbnail.height) {
/* try to evaluate if thumbnail data is present */
exif_scan_thumbnail(&ImageInfo TSRMLS_CC);
}
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Thumbnail.FileType", ImageInfo.Thumbnail.filetype TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Thumbnail.MimeType", (char*)php_image_type_to_mime_type(ImageInfo.Thumbnail.filetype) TSRMLS_CC);
}
if (ImageInfo.Thumbnail.width && ImageInfo.Thumbnail.height) {
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Thumbnail.Height", ImageInfo.Thumbnail.height TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Thumbnail.Width", ImageInfo.Thumbnail.width TSRMLS_CC);
}
EFREE_IF(sections_str);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Adding image infos");
#endif
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_FILE TSRMLS_CC);
add_assoc_image_info(return_value, 1, &ImageInfo, SECTION_COMPUTED TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_ANY_TAG TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_IFD0 TSRMLS_CC);
add_assoc_image_info(return_value, 1, &ImageInfo, SECTION_THUMBNAIL TSRMLS_CC);
add_assoc_image_info(return_value, 1, &ImageInfo, SECTION_COMMENT TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_EXIF TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_GPS TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_INTEROP TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_FPIX TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_APP12 TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_WINXP TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_MAKERNOTE TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Discarding info");
#endif
exif_discard_imageinfo(&ImageInfo);
#ifdef EXIF_DEBUG
php_error_docref1(NULL TSRMLS_CC, Z_STRVAL_PP(p_name), E_NOTICE, "done");
#endif
}
/* }}} */
/* {{{ proto string exif_thumbnail(string filename [, &width, &height [, &imagetype]])
Reads the embedded thumbnail */
PHP_FUNCTION(exif_thumbnail)
{
zval *p_width = 0, *p_height = 0, *p_imagetype = 0;
char *p_name;
int p_name_len, ret, arg_c = ZEND_NUM_ARGS();
image_info_type ImageInfo;
memset(&ImageInfo, 0, sizeof(ImageInfo));
if (arg_c!=1 && arg_c!=3 && arg_c!=4) {
WRONG_PARAM_COUNT;
}
if (zend_parse_parameters(arg_c TSRMLS_CC, "p|z/z/z/", &p_name, &p_name_len, &p_width, &p_height, &p_imagetype) == FAILURE) {
return;
}
ret = exif_read_file(&ImageInfo, p_name, 1, 0 TSRMLS_CC);
if (ret==FALSE) {
exif_discard_imageinfo(&ImageInfo);
RETURN_FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Thumbnail data %d %d %d, %d x %d", ImageInfo.Thumbnail.data, ImageInfo.Thumbnail.size, ImageInfo.Thumbnail.filetype, ImageInfo.Thumbnail.width, ImageInfo.Thumbnail.height);
#endif
if (!ImageInfo.Thumbnail.data || !ImageInfo.Thumbnail.size) {
exif_discard_imageinfo(&ImageInfo);
RETURN_FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Returning thumbnail(%d)", ImageInfo.Thumbnail.size);
#endif
ZVAL_STRINGL(return_value, ImageInfo.Thumbnail.data, ImageInfo.Thumbnail.size, 1);
if (arg_c >= 3) {
if (!ImageInfo.Thumbnail.width || !ImageInfo.Thumbnail.height) {
exif_scan_thumbnail(&ImageInfo TSRMLS_CC);
}
zval_dtor(p_width);
zval_dtor(p_height);
ZVAL_LONG(p_width, ImageInfo.Thumbnail.width);
ZVAL_LONG(p_height, ImageInfo.Thumbnail.height);
}
if (arg_c >= 4) {
zval_dtor(p_imagetype);
ZVAL_LONG(p_imagetype, ImageInfo.Thumbnail.filetype);
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Discarding info");
#endif
exif_discard_imageinfo(&ImageInfo);
#ifdef EXIF_DEBUG
php_error_docref1(NULL TSRMLS_CC, p_name, E_NOTICE, "Done");
#endif
}
/* }}} */
/* {{{ proto int exif_imagetype(string imagefile)
Get the type of an image */
PHP_FUNCTION(exif_imagetype)
{
char *imagefile;
int imagefile_len;
php_stream * stream;
int itype = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &imagefile, &imagefile_len) == FAILURE) {
return;
}
stream = php_stream_open_wrapper(imagefile, "rb", IGNORE_PATH|REPORT_ERRORS, NULL);
if (stream == NULL) {
RETURN_FALSE;
}
itype = php_getimagetype(stream, NULL TSRMLS_CC);
php_stream_close(stream);
if (itype == IMAGE_FILETYPE_UNKNOWN) {
RETURN_FALSE;
} else {
ZVAL_LONG(return_value, itype);
}
}
/* }}} */
#endif
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: sw=4 ts=4 tw=78 fdm=marker
* vim<600: sw=4 ts=4 tw=78
*/
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_5258_0 |
crossvul-cpp_data_bad_759_1 | /*
* IPv6 output functions
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* Based on linux/net/ipv4/ip_output.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes:
* A.N.Kuznetsov : airthmetics in fragmentation.
* extension headers are implemented.
* route changes now work.
* ip6_forward does not confuse sniffers.
* etc.
*
* H. von Brand : Added missing #include <linux/string.h>
* Imran Patel : frag id should be in NBO
* Kazunori MIYAZAWA @USAGI
* : add ip6_append_data and related functions
* for datagram xmit
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
#include <linux/tcp.h>
#include <linux/route.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/protocol.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <net/rawv6.h>
#include <net/icmp.h>
#include <net/xfrm.h>
#include <net/checksum.h>
#include <linux/mroute6.h>
static int ip6_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev;
struct neighbour *neigh;
struct in6_addr *nexthop;
int ret;
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
((mroute6_socket(dev_net(dev), skb) &&
!(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
&ipv6_hdr(skb)->saddr))) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
/* Do not check for IFF_ALLMULTI; multicast routing
is not supported in any case.
*/
if (newskb)
NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
newskb, NULL, newskb->dev,
dev_loopback_xmit);
if (ipv6_hdr(skb)->hop_limit == 0) {
IP6_INC_STATS(dev_net(dev), idev,
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
return 0;
}
}
IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
skb->len);
if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
IPV6_ADDR_SCOPE_NODELOCAL &&
!(dev->flags & IFF_LOOPBACK)) {
kfree_skb(skb);
return 0;
}
}
rcu_read_lock_bh();
nexthop = rt6_nexthop((struct rt6_info *)dst);
neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
if (!IS_ERR(neigh)) {
ret = dst_neigh_output(dst, neigh, skb);
rcu_read_unlock_bh();
return ret;
}
rcu_read_unlock_bh();
IP6_INC_STATS(dev_net(dst->dev),
ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
return -EINVAL;
}
static int ip6_finish_output(struct sk_buff *skb)
{
if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
dst_allfrag(skb_dst(skb)) ||
(IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
return ip6_fragment(skb, ip6_finish_output2);
else
return ip6_finish_output2(skb);
}
int ip6_output(struct sock *sk, struct sk_buff *skb)
{
struct net_device *dev = skb_dst(skb)->dev;
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
if (unlikely(idev->cnf.disable_ipv6)) {
IP6_INC_STATS(dev_net(dev), idev,
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
return 0;
}
return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
ip6_finish_output,
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
/*
* xmit an sk_buff (used by TCP, SCTP and DCCP)
*/
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
struct ipv6_txoptions *opt, int tclass)
{
struct net *net = sock_net(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *first_hop = &fl6->daddr;
struct dst_entry *dst = skb_dst(skb);
unsigned int head_room;
struct ipv6hdr *hdr;
u8 proto = fl6->flowi6_proto;
int seg_len = skb->len;
int hlimit = -1;
u32 mtu;
head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
if (opt)
head_room += opt->opt_nflen + opt->opt_flen;
if (unlikely(skb_headroom(skb) < head_room)) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
if (!skb2) {
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
return -ENOBUFS;
}
if (skb->sk)
skb_set_owner_w(skb2, skb->sk);
consume_skb(skb);
skb = skb2;
}
if (opt) {
seg_len += opt->opt_nflen + opt->opt_flen;
if (opt->opt_flen)
ipv6_push_frag_opts(skb, opt, &proto);
if (opt->opt_nflen)
ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
}
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
hdr = ipv6_hdr(skb);
/*
* Fill in the IPv6 header
*/
if (np)
hlimit = np->hop_limit;
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
np->autoflowlabel));
hdr->payload_len = htons(seg_len);
hdr->nexthdr = proto;
hdr->hop_limit = hlimit;
hdr->saddr = fl6->saddr;
hdr->daddr = *first_hop;
skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
mtu = dst_mtu(dst);
if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUT, skb->len);
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
dst->dev, dst_output);
}
skb->dev = dst->dev;
ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
return -EMSGSIZE;
}
EXPORT_SYMBOL(ip6_xmit);
static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
{
struct ip6_ra_chain *ra;
struct sock *last = NULL;
read_lock(&ip6_ra_lock);
for (ra = ip6_ra_chain; ra; ra = ra->next) {
struct sock *sk = ra->sk;
if (sk && ra->sel == sel &&
(!sk->sk_bound_dev_if ||
sk->sk_bound_dev_if == skb->dev->ifindex)) {
if (last) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
rawv6_rcv(last, skb2);
}
last = sk;
}
}
if (last) {
rawv6_rcv(last, skb);
read_unlock(&ip6_ra_lock);
return 1;
}
read_unlock(&ip6_ra_lock);
return 0;
}
static int ip6_forward_proxy_check(struct sk_buff *skb)
{
struct ipv6hdr *hdr = ipv6_hdr(skb);
u8 nexthdr = hdr->nexthdr;
__be16 frag_off;
int offset;
if (ipv6_ext_hdr(nexthdr)) {
offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
if (offset < 0)
return 0;
} else
offset = sizeof(struct ipv6hdr);
if (nexthdr == IPPROTO_ICMPV6) {
struct icmp6hdr *icmp6;
if (!pskb_may_pull(skb, (skb_network_header(skb) +
offset + 1 - skb->data)))
return 0;
icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
switch (icmp6->icmp6_type) {
case NDISC_ROUTER_SOLICITATION:
case NDISC_ROUTER_ADVERTISEMENT:
case NDISC_NEIGHBOUR_SOLICITATION:
case NDISC_NEIGHBOUR_ADVERTISEMENT:
case NDISC_REDIRECT:
/* For reaction involving unicast neighbor discovery
* message destined to the proxied address, pass it to
* input function.
*/
return 1;
default:
break;
}
}
/*
* The proxying router can't forward traffic sent to a link-local
* address, so signal the sender and discard the packet. This
* behavior is clarified by the MIPv6 specification.
*/
if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
dst_link_failure(skb);
return -1;
}
return 0;
}
static inline int ip6_forward_finish(struct sk_buff *skb)
{
return dst_output(skb);
}
static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
{
unsigned int mtu;
struct inet6_dev *idev;
if (dst_metric_locked(dst, RTAX_MTU)) {
mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
return mtu;
}
mtu = IPV6_MIN_MTU;
rcu_read_lock();
idev = __in6_dev_get(dst->dev);
if (idev)
mtu = idev->cnf.mtu6;
rcu_read_unlock();
return mtu;
}
static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
{
if (skb->len <= mtu)
return false;
/* ipv6 conntrack defrag sets max_frag_size + ignore_df */
if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
return true;
if (skb->ignore_df)
return false;
if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
return false;
return true;
}
int ip6_forward(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct ipv6hdr *hdr = ipv6_hdr(skb);
struct inet6_skb_parm *opt = IP6CB(skb);
struct net *net = dev_net(dst->dev);
u32 mtu;
if (net->ipv6.devconf_all->forwarding == 0)
goto error;
if (skb->pkt_type != PACKET_HOST)
goto drop;
if (unlikely(skb->sk))
goto drop;
if (skb_warn_if_lro(skb))
goto drop;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
IPSTATS_MIB_INDISCARDS);
goto drop;
}
skb_forward_csum(skb);
/*
* We DO NOT make any processing on
* RA packets, pushing them to user level AS IS
* without ane WARRANTY that application will be able
* to interpret them. The reason is that we
* cannot make anything clever here.
*
* We are not end-node, so that if packet contains
* AH/ESP, we cannot make anything.
* Defragmentation also would be mistake, RA packets
* cannot be fragmented, because there is no warranty
* that different fragments will go along one path. --ANK
*/
if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
return 0;
}
/*
* check and decrement ttl
*/
if (hdr->hop_limit <= 1) {
/* Force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb);
return -ETIMEDOUT;
}
/* XXX: idev->cnf.proxy_ndp? */
if (net->ipv6.devconf_all->proxy_ndp &&
pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
int proxied = ip6_forward_proxy_check(skb);
if (proxied > 0)
return ip6_input(skb);
else if (proxied < 0) {
IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
IPSTATS_MIB_INDISCARDS);
goto drop;
}
}
if (!xfrm6_route_forward(skb)) {
IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
IPSTATS_MIB_INDISCARDS);
goto drop;
}
dst = skb_dst(skb);
/* IPv6 specs say nothing about it, but it is clear that we cannot
send redirects to source routed frames.
We don't send redirects to frames decapsulated from IPsec.
*/
if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
struct in6_addr *target = NULL;
struct inet_peer *peer;
struct rt6_info *rt;
/*
* incoming and outgoing devices are the same
* send a redirect.
*/
rt = (struct rt6_info *) dst;
if (rt->rt6i_flags & RTF_GATEWAY)
target = &rt->rt6i_gateway;
else
target = &hdr->daddr;
peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
/* Limit redirects both by destination (here)
and by source (inside ndisc_send_redirect)
*/
if (inet_peer_xrlim_allow(peer, 1*HZ))
ndisc_send_redirect(skb, target);
if (peer)
inet_putpeer(peer);
} else {
int addrtype = ipv6_addr_type(&hdr->saddr);
/* This check is security critical. */
if (addrtype == IPV6_ADDR_ANY ||
addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
goto error;
if (addrtype & IPV6_ADDR_LINKLOCAL) {
icmpv6_send(skb, ICMPV6_DEST_UNREACH,
ICMPV6_NOT_NEIGHBOUR, 0);
goto error;
}
}
mtu = ip6_dst_mtu_forward(dst);
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
if (ip6_pkt_too_big(skb, mtu)) {
/* Again, force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
IPSTATS_MIB_INTOOBIGERRORS);
IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
return -EMSGSIZE;
}
if (skb_cow(skb, dst->dev->hard_header_len)) {
IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
IPSTATS_MIB_OUTDISCARDS);
goto drop;
}
hdr = ipv6_hdr(skb);
/* Mangling hops number delayed to point after skb COW */
hdr->hop_limit--;
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
ip6_forward_finish);
error:
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
drop:
kfree_skb(skb);
return -EINVAL;
}
static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
{
to->pkt_type = from->pkt_type;
to->priority = from->priority;
to->protocol = from->protocol;
skb_dst_drop(to);
skb_dst_set(to, dst_clone(skb_dst(from)));
to->dev = from->dev;
to->mark = from->mark;
skb_copy_hash(to, from);
#ifdef CONFIG_NET_SCHED
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
skb_copy_secmark(to, from);
}
static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
{
static u32 ip6_idents_hashrnd __read_mostly;
u32 hash, id;
net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
id = ip_idents_reserve(hash, 1);
fhdr->identification = htonl(id);
}
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
struct sk_buff *frag;
struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : NULL;
struct ipv6hdr *tmp_hdr;
struct frag_hdr *fh;
unsigned int mtu, hlen, left, len;
int hroom, troom;
__be32 frag_id = 0;
int ptr, offset = 0, err = 0;
u8 *prevhdr, nexthdr = 0;
struct net *net = dev_net(skb_dst(skb)->dev);
err = ip6_find_1stfragopt(skb, &prevhdr);
if (err < 0)
goto fail;
hlen = err;
nexthdr = *prevhdr;
mtu = ip6_skb_dst_mtu(skb);
/* We must not fragment if the socket is set to force MTU discovery
* or if the skb it not generated by a local socket.
*/
if (unlikely(!skb->ignore_df && skb->len > mtu) ||
(IP6CB(skb)->frag_max_size &&
IP6CB(skb)->frag_max_size > mtu)) {
if (skb->sk && dst_allfrag(skb_dst(skb)))
sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
skb->dev = skb_dst(skb)->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
return -EMSGSIZE;
}
if (np && np->frag_size < mtu) {
if (np->frag_size)
mtu = np->frag_size;
}
mtu -= hlen + sizeof(struct frag_hdr);
if (skb_has_frag_list(skb)) {
int first_len = skb_pagelen(skb);
struct sk_buff *frag2;
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
skb_cloned(skb))
goto slow_path;
skb_walk_frags(skb, frag) {
/* Correct geometry. */
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
skb_headroom(frag) < hlen)
goto slow_path_clean;
/* Partially cloned skb? */
if (skb_shared(frag))
goto slow_path_clean;
BUG_ON(frag->sk);
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
}
skb->truesize -= frag->truesize;
}
err = 0;
offset = 0;
frag = skb_shinfo(skb)->frag_list;
skb_frag_list_init(skb);
/* BUILD HEADER */
*prevhdr = NEXTHDR_FRAGMENT;
tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
if (!tmp_hdr) {
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGFAILS);
return -ENOMEM;
}
__skb_pull(skb, hlen);
fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
__skb_push(skb, hlen);
skb_reset_network_header(skb);
memcpy(skb_network_header(skb), tmp_hdr, hlen);
ipv6_select_ident(fh, rt);
fh->nexthdr = nexthdr;
fh->reserved = 0;
fh->frag_off = htons(IP6_MF);
frag_id = fh->identification;
first_len = skb_pagelen(skb);
skb->data_len = first_len - skb_headlen(skb);
skb->len = first_len;
ipv6_hdr(skb)->payload_len = htons(first_len -
sizeof(struct ipv6hdr));
dst_hold(&rt->dst);
for (;;) {
/* Prepare header of the next frame,
* before previous one went down. */
if (frag) {
frag->ip_summed = CHECKSUM_NONE;
skb_reset_transport_header(frag);
fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
__skb_push(frag, hlen);
skb_reset_network_header(frag);
memcpy(skb_network_header(frag), tmp_hdr,
hlen);
offset += skb->len - hlen - sizeof(struct frag_hdr);
fh->nexthdr = nexthdr;
fh->reserved = 0;
fh->frag_off = htons(offset);
if (frag->next != NULL)
fh->frag_off |= htons(IP6_MF);
fh->identification = frag_id;
ipv6_hdr(frag)->payload_len =
htons(frag->len -
sizeof(struct ipv6hdr));
ip6_copy_metadata(frag, skb);
}
err = output(skb);
if (!err)
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGCREATES);
if (err || !frag)
break;
skb = frag;
frag = skb->next;
skb->next = NULL;
}
kfree(tmp_hdr);
if (err == 0) {
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGOKS);
ip6_rt_put(rt);
return 0;
}
kfree_skb_list(frag);
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGFAILS);
ip6_rt_put(rt);
return err;
slow_path_clean:
skb_walk_frags(skb, frag2) {
if (frag2 == frag)
break;
frag2->sk = NULL;
frag2->destructor = NULL;
skb->truesize += frag2->truesize;
}
}
slow_path:
if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
skb_checksum_help(skb))
goto fail;
left = skb->len - hlen; /* Space per frame */
ptr = hlen; /* Where to start from */
/*
* Fragment the datagram.
*/
*prevhdr = NEXTHDR_FRAGMENT;
hroom = LL_RESERVED_SPACE(rt->dst.dev);
troom = rt->dst.dev->needed_tailroom;
/*
* Keep copying data until we run out.
*/
while (left > 0) {
len = left;
/* IF: it doesn't fit, use 'mtu' - the data space left */
if (len > mtu)
len = mtu;
/* IF: we are not sending up to and including the packet end
then align the next start on an eight byte boundary */
if (len < left) {
len &= ~7;
}
/*
* Allocate buffer.
*/
if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
hroom + troom, GFP_ATOMIC)) == NULL) {
NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGFAILS);
err = -ENOMEM;
goto fail;
}
/*
* Set up data on packet
*/
ip6_copy_metadata(frag, skb);
skb_reserve(frag, hroom);
skb_put(frag, len + hlen + sizeof(struct frag_hdr));
skb_reset_network_header(frag);
fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
frag->transport_header = (frag->network_header + hlen +
sizeof(struct frag_hdr));
/*
* Charge the memory for the fragment to any owner
* it might possess
*/
if (skb->sk)
skb_set_owner_w(frag, skb->sk);
/*
* Copy the packet header into the new buffer.
*/
skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
/*
* Build fragment header.
*/
fh->nexthdr = nexthdr;
fh->reserved = 0;
if (!frag_id) {
ipv6_select_ident(fh, rt);
frag_id = fh->identification;
} else
fh->identification = frag_id;
/*
* Copy a block of the IP datagram.
*/
BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
len));
left -= len;
fh->frag_off = htons(offset);
if (left > 0)
fh->frag_off |= htons(IP6_MF);
ipv6_hdr(frag)->payload_len = htons(frag->len -
sizeof(struct ipv6hdr));
ptr += len;
offset += len;
/*
* Put this fragment into the sending queue.
*/
err = output(frag);
if (err)
goto fail;
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGCREATES);
}
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGOKS);
consume_skb(skb);
return err;
fail:
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
return err;
}
static inline int ip6_rt_check(const struct rt6key *rt_key,
const struct in6_addr *fl_addr,
const struct in6_addr *addr_cache)
{
return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
(addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
}
static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
struct dst_entry *dst,
const struct flowi6 *fl6)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct rt6_info *rt;
if (!dst)
goto out;
if (dst->ops->family != AF_INET6) {
dst_release(dst);
return NULL;
}
rt = (struct rt6_info *)dst;
/* Yes, checking route validity in not connected
* case is not very simple. Take into account,
* that we do not support routing by source, TOS,
* and MSG_DONTROUTE --ANK (980726)
*
* 1. ip6_rt_check(): If route was host route,
* check that cached destination is current.
* If it is network route, we still may
* check its validity using saved pointer
* to the last used address: daddr_cache.
* We do not want to save whole address now,
* (because main consumer of this service
* is tcp, which has not this problem),
* so that the last trick works only on connected
* sockets.
* 2. oif also should be the same.
*/
if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
#ifdef CONFIG_IPV6_SUBTREES
ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
#endif
(fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
dst_release(dst);
dst = NULL;
}
out:
return dst;
}
static int ip6_dst_lookup_tail(struct sock *sk,
struct dst_entry **dst, struct flowi6 *fl6)
{
struct net *net = sock_net(sk);
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
struct neighbour *n;
struct rt6_info *rt;
#endif
int err;
if (*dst == NULL)
*dst = ip6_route_output(net, sk, fl6);
if ((err = (*dst)->error))
goto out_err_release;
if (ipv6_addr_any(&fl6->saddr)) {
struct rt6_info *rt = (struct rt6_info *) *dst;
err = ip6_route_get_saddr(net, rt, &fl6->daddr,
sk ? inet6_sk(sk)->srcprefs : 0,
&fl6->saddr);
if (err)
goto out_err_release;
}
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
/*
* Here if the dst entry we've looked up
* has a neighbour entry that is in the INCOMPLETE
* state and the src address from the flow is
* marked as OPTIMISTIC, we release the found
* dst entry and replace it instead with the
* dst entry of the nexthop router
*/
rt = (struct rt6_info *) *dst;
rcu_read_lock_bh();
n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
rcu_read_unlock_bh();
if (err) {
struct inet6_ifaddr *ifp;
struct flowi6 fl_gw6;
int redirect;
ifp = ipv6_get_ifaddr(net, &fl6->saddr,
(*dst)->dev, 1);
redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
if (ifp)
in6_ifa_put(ifp);
if (redirect) {
/*
* We need to get the dst entry for the
* default router instead
*/
dst_release(*dst);
memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
*dst = ip6_route_output(net, sk, &fl_gw6);
if ((err = (*dst)->error))
goto out_err_release;
}
}
#endif
if (ipv6_addr_v4mapped(&fl6->saddr) &&
!(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
err = -EAFNOSUPPORT;
goto out_err_release;
}
return 0;
out_err_release:
if (err == -ENETUNREACH)
IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
dst_release(*dst);
*dst = NULL;
return err;
}
/**
* ip6_dst_lookup - perform route lookup on flow
* @sk: socket which provides route info
* @dst: pointer to dst_entry * for result
* @fl6: flow to lookup
*
* This function performs a route lookup on the given flow.
*
* It returns zero on success, or a standard errno code on error.
*/
int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
{
*dst = NULL;
return ip6_dst_lookup_tail(sk, dst, fl6);
}
EXPORT_SYMBOL_GPL(ip6_dst_lookup);
/**
* ip6_dst_lookup_flow - perform route lookup on flow with ipsec
* @sk: socket which provides route info
* @fl6: flow to lookup
* @final_dst: final destination address for ipsec lookup
*
* This function performs a route lookup on the given flow.
*
* It returns a valid dst pointer on success, or a pointer encoded
* error code.
*/
struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst)
{
struct dst_entry *dst = NULL;
int err;
err = ip6_dst_lookup_tail(sk, &dst, fl6);
if (err)
return ERR_PTR(err);
if (final_dst)
fl6->daddr = *final_dst;
return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
}
EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
/**
* ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
* @sk: socket which provides the dst cache and route info
* @fl6: flow to lookup
* @final_dst: final destination address for ipsec lookup
*
* This function performs a route lookup on the given flow with the
* possibility of using the cached route in the socket if it is valid.
* It will take the socket dst lock when operating on the dst cache.
* As a result, this function can only be used in process context.
*
* It returns a valid dst pointer on success, or a pointer encoded
* error code.
*/
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst)
{
struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
dst = ip6_sk_dst_check(sk, dst, fl6);
if (!dst)
dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
return dst;
}
EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
static inline int ip6_ufo_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int hh_len, int fragheaderlen,
int transhdrlen, int mtu, unsigned int flags,
struct rt6_info *rt)
{
struct sk_buff *skb;
struct frag_hdr fhdr;
int err;
/* There is support for UDP large send offload by network
* device, so create one single skb packet containing complete
* udp datagram
*/
if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
skb = sock_alloc_send_skb(sk,
hh_len + fragheaderlen + transhdrlen + 20,
(flags & MSG_DONTWAIT), &err);
if (skb == NULL)
return err;
/* reserve space for Hardware header */
skb_reserve(skb, hh_len);
/* create space for UDP/IP header */
skb_put(skb, fragheaderlen + transhdrlen);
/* initialize network header pointer */
skb_reset_network_header(skb);
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
skb->protocol = htons(ETH_P_IPV6);
skb->csum = 0;
__skb_queue_tail(&sk->sk_write_queue, skb);
} else if (skb_is_gso(skb)) {
goto append;
}
skb->ip_summed = CHECKSUM_PARTIAL;
/* Specify the length of each IPv6 datagram fragment.
* It has to be a multiple of 8.
*/
skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
sizeof(struct frag_hdr)) & ~7;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
ipv6_select_ident(&fhdr, rt);
skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
append:
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
}
static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
gfp_t gfp)
{
return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
}
static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
gfp_t gfp)
{
return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
}
static void ip6_append_data_mtu(unsigned int *mtu,
int *maxfraglen,
unsigned int fragheaderlen,
struct sk_buff *skb,
struct rt6_info *rt,
unsigned int orig_mtu)
{
if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
if (skb == NULL) {
/* first fragment, reserve header_len */
*mtu = orig_mtu - rt->dst.header_len;
} else {
/*
* this fragment is not first, the headers
* space is regarded as data space.
*/
*mtu = orig_mtu;
}
*maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ fragheaderlen - sizeof(struct frag_hdr);
}
}
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
int offset, int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
struct rt6_info *rt, unsigned int flags, int dontfrag)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_cork *cork;
struct sk_buff *skb, *skb_prev = NULL;
unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
int exthdrlen;
int dst_exthdrlen;
int hh_len;
int copy;
int err;
int offset = 0;
__u8 tx_flags = 0;
u32 tskey = 0;
if (flags&MSG_PROBE)
return 0;
cork = &inet->cork.base;
if (skb_queue_empty(&sk->sk_write_queue)) {
/*
* setup for corking
*/
if (opt) {
if (WARN_ON(np->cork.opt))
return -EINVAL;
np->cork.opt = kzalloc(sizeof(*opt), sk->sk_allocation);
if (unlikely(np->cork.opt == NULL))
return -ENOBUFS;
np->cork.opt->tot_len = sizeof(*opt);
np->cork.opt->opt_flen = opt->opt_flen;
np->cork.opt->opt_nflen = opt->opt_nflen;
np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
sk->sk_allocation);
if (opt->dst0opt && !np->cork.opt->dst0opt)
return -ENOBUFS;
np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
sk->sk_allocation);
if (opt->dst1opt && !np->cork.opt->dst1opt)
return -ENOBUFS;
np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
sk->sk_allocation);
if (opt->hopopt && !np->cork.opt->hopopt)
return -ENOBUFS;
np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
sk->sk_allocation);
if (opt->srcrt && !np->cork.opt->srcrt)
return -ENOBUFS;
/* need source address above miyazawa*/
}
dst_hold(&rt->dst);
cork->dst = &rt->dst;
inet->cork.fl.u.ip6 = *fl6;
np->cork.hop_limit = hlimit;
np->cork.tclass = tclass;
if (rt->dst.flags & DST_XFRM_TUNNEL)
mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
else
mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path);
if (np->frag_size < mtu) {
if (np->frag_size)
mtu = np->frag_size;
}
if (mtu < IPV6_MIN_MTU)
return -EINVAL;
cork->fragsize = mtu;
if (dst_allfrag(rt->dst.path))
cork->flags |= IPCORK_ALLFRAG;
cork->length = 0;
exthdrlen = (opt ? opt->opt_flen : 0);
length += exthdrlen;
transhdrlen += exthdrlen;
dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
} else {
rt = (struct rt6_info *)cork->dst;
fl6 = &inet->cork.fl.u.ip6;
opt = np->cork.opt;
transhdrlen = 0;
exthdrlen = 0;
dst_exthdrlen = 0;
mtu = cork->fragsize;
}
orig_mtu = mtu;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
(opt ? opt->opt_nflen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
sizeof(struct frag_hdr);
if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
unsigned int maxnonfragsize, headersize;
headersize = sizeof(struct ipv6hdr) +
(opt ? opt->opt_flen + opt->opt_nflen : 0) +
(dst_allfrag(&rt->dst) ?
sizeof(struct frag_hdr) : 0) +
rt->rt6i_nfheader_len;
if (ip6_sk_ignore_df(sk))
maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
else
maxnonfragsize = mtu;
/* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
* the first fragment
*/
if (headersize + transhdrlen > mtu)
goto emsgsize;
/* dontfrag active */
if ((cork->length + length > mtu - headersize) && dontfrag &&
(sk->sk_protocol == IPPROTO_UDP ||
sk->sk_protocol == IPPROTO_RAW)) {
ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
sizeof(struct ipv6hdr));
goto emsgsize;
}
if (cork->length + length > maxnonfragsize - headersize) {
emsgsize:
pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
return -EMSGSIZE;
}
}
if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
sock_tx_timestamp(sk, &tx_flags);
if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
tskey = sk->sk_tskey++;
}
/*
* Let's try using as much space as possible.
* Use MTU if total length of the message fits into the MTU.
* Otherwise, we need to reserve fragment header and
* fragment alignment (= 8-15 octects, in total).
*
* Note that we may need to "move" the data from the tail of
* of the buffer to the new fragment when we split
* the message.
*
* FIXME: It may be fragmented into multiple chunks
* at once if non-fragmentable extension headers
* are too large.
* --yoshfuji
*/
skb = skb_peek_tail(&sk->sk_write_queue);
cork->length += length;
if ((skb && skb_is_gso(skb)) ||
(((length + fragheaderlen) > mtu) &&
(skb_queue_len(&sk->sk_write_queue) <= 1) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO) &&
(sk->sk_type == SOCK_DGRAM))) {
err = ip6_ufo_append_data(sk, getfrag, from, length,
hh_len, fragheaderlen,
transhdrlen, mtu, flags, rt);
if (err)
goto error;
return 0;
}
if (!skb)
goto alloc_new_skb;
while (length > 0) {
/* Check if the remaining data fits into current packet. */
copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
if (copy < length)
copy = maxfraglen - skb->len;
if (copy <= 0) {
char *data;
unsigned int datalen;
unsigned int fraglen;
unsigned int fraggap;
unsigned int alloclen;
alloc_new_skb:
/* There's no room in the current skb */
if (skb)
fraggap = skb->len - maxfraglen;
else
fraggap = 0;
/* update mtu and maxfraglen if necessary */
if (skb == NULL || skb_prev == NULL)
ip6_append_data_mtu(&mtu, &maxfraglen,
fragheaderlen, skb, rt,
orig_mtu);
skb_prev = skb;
/*
* If remaining data exceeds the mtu,
* we know we need more fragment(s).
*/
datalen = length + fraggap;
if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
else
alloclen = datalen + fragheaderlen;
alloclen += dst_exthdrlen;
if (datalen != length + fraggap) {
/*
* this is not the last fragment, the trailer
* space is regarded as data space.
*/
datalen += rt->dst.trailer_len;
}
alloclen += rt->dst.trailer_len;
fraglen = datalen + fragheaderlen;
/*
* We just reserve space for fragment header.
* Note: this may be overallocation if the message
* (without MSG_MORE) fits into the MTU.
*/
alloclen += sizeof(struct frag_hdr);
copy = datalen - transhdrlen - fraggap;
if (copy < 0) {
err = -EINVAL;
goto error;
}
if (transhdrlen) {
skb = sock_alloc_send_skb(sk,
alloclen + hh_len,
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
if (atomic_read(&sk->sk_wmem_alloc) <=
2 * sk->sk_sndbuf)
skb = sock_wmalloc(sk,
alloclen + hh_len, 1,
sk->sk_allocation);
if (unlikely(skb == NULL))
err = -ENOBUFS;
}
if (skb == NULL)
goto error;
/*
* Fill in the control structures
*/
skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = CHECKSUM_NONE;
skb->csum = 0;
/* reserve for fragmentation and ipsec header */
skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
dst_exthdrlen);
/* Only the initial fragment is time stamped */
skb_shinfo(skb)->tx_flags = tx_flags;
tx_flags = 0;
skb_shinfo(skb)->tskey = tskey;
tskey = 0;
/*
* Find where to start putting bytes
*/
data = skb_put(skb, fraglen);
skb_set_network_header(skb, exthdrlen);
data += fragheaderlen;
skb->transport_header = (skb->network_header +
fragheaderlen);
if (fraggap) {
skb->csum = skb_copy_and_csum_bits(
skb_prev, maxfraglen,
data + transhdrlen, fraggap, 0);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
data += fraggap;
pskb_trim_unique(skb_prev, maxfraglen);
}
if (copy > 0 &&
getfrag(from, data + transhdrlen, offset,
copy, fraggap, skb) < 0) {
err = -EFAULT;
kfree_skb(skb);
goto error;
}
offset += copy;
length -= datalen - fraggap;
transhdrlen = 0;
exthdrlen = 0;
dst_exthdrlen = 0;
/*
* Put the packet on the pending queue
*/
__skb_queue_tail(&sk->sk_write_queue, skb);
continue;
}
if (copy > length)
copy = length;
if (!(rt->dst.dev->features&NETIF_F_SG) &&
skb_tailroom(skb) >= copy) {
unsigned int off;
off = skb->len;
if (getfrag(from, skb_put(skb, copy),
offset, copy, off, skb) < 0) {
__skb_trim(skb, off);
err = -EFAULT;
goto error;
}
} else {
int i = skb_shinfo(skb)->nr_frags;
struct page_frag *pfrag = sk_page_frag(sk);
err = -ENOMEM;
if (!sk_page_frag_refill(sk, pfrag))
goto error;
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
err = -EMSGSIZE;
if (i == MAX_SKB_FRAGS)
goto error;
__skb_fill_page_desc(skb, i, pfrag->page,
pfrag->offset, 0);
skb_shinfo(skb)->nr_frags = ++i;
get_page(pfrag->page);
}
copy = min_t(int, copy, pfrag->size - pfrag->offset);
if (getfrag(from,
page_address(pfrag->page) + pfrag->offset,
offset, copy, skb->len, skb) < 0)
goto error_efault;
pfrag->offset += copy;
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
atomic_add(copy, &sk->sk_wmem_alloc);
}
offset += copy;
length -= copy;
}
return 0;
error_efault:
err = -EFAULT;
error:
cork->length -= length;
IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
return err;
}
EXPORT_SYMBOL_GPL(ip6_append_data);
static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
{
if (np->cork.opt) {
kfree(np->cork.opt->dst0opt);
kfree(np->cork.opt->dst1opt);
kfree(np->cork.opt->hopopt);
kfree(np->cork.opt->srcrt);
kfree(np->cork.opt);
np->cork.opt = NULL;
}
if (inet->cork.base.dst) {
dst_release(inet->cork.base.dst);
inet->cork.base.dst = NULL;
inet->cork.base.flags &= ~IPCORK_ALLFRAG;
}
memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
}
int ip6_push_pending_frames(struct sock *sk)
{
struct sk_buff *skb, *tmp_skb;
struct sk_buff **tail_skb;
struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct net *net = sock_net(sk);
struct ipv6hdr *hdr;
struct ipv6_txoptions *opt = np->cork.opt;
struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
unsigned char proto = fl6->flowi6_proto;
int err = 0;
if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
goto out;
tail_skb = &(skb_shinfo(skb)->frag_list);
/* move skb->data to ip header from ext header */
if (skb->data < skb_network_header(skb))
__skb_pull(skb, skb_network_offset(skb));
while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
__skb_pull(tmp_skb, skb_network_header_len(skb));
*tail_skb = tmp_skb;
tail_skb = &(tmp_skb->next);
skb->len += tmp_skb->len;
skb->data_len += tmp_skb->len;
skb->truesize += tmp_skb->truesize;
tmp_skb->destructor = NULL;
tmp_skb->sk = NULL;
}
/* Allow local fragmentation. */
skb->ignore_df = ip6_sk_ignore_df(sk);
*final_dst = fl6->daddr;
__skb_pull(skb, skb_network_header_len(skb));
if (opt && opt->opt_flen)
ipv6_push_frag_opts(skb, opt, &proto);
if (opt && opt->opt_nflen)
ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
hdr = ipv6_hdr(skb);
ip6_flow_hdr(hdr, np->cork.tclass,
ip6_make_flowlabel(net, skb, fl6->flowlabel,
np->autoflowlabel));
hdr->hop_limit = np->cork.hop_limit;
hdr->nexthdr = proto;
hdr->saddr = fl6->saddr;
hdr->daddr = *final_dst;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
skb_dst_set(skb, dst_clone(&rt->dst));
IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
if (proto == IPPROTO_ICMPV6) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
}
err = ip6_local_out(skb);
if (err) {
if (err > 0)
err = net_xmit_errno(err);
if (err)
goto error;
}
out:
ip6_cork_release(inet, np);
return err;
error:
IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
goto out;
}
EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
void ip6_flush_pending_frames(struct sock *sk)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
if (skb_dst(skb))
IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
}
ip6_cork_release(inet_sk(sk), inet6_sk(sk));
}
EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_759_1 |
crossvul-cpp_data_good_2286_1 | /*
* Generic address resultion entity
*
* Authors:
* net_random Alan Cox
* net_ratelimit Andi Kleen
* in{4,6}_pton YOSHIFUJI Hideaki, Copyright (C)2006 USAGI/WIDE Project
*
* Created by Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/inet.h>
#include <linux/mm.h>
#include <linux/net.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/ratelimit.h>
#include <net/sock.h>
#include <net/net_ratelimit.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
int net_msg_warn __read_mostly = 1;
EXPORT_SYMBOL(net_msg_warn);
DEFINE_RATELIMIT_STATE(net_ratelimit_state, 5 * HZ, 10);
/*
* All net warning printk()s should be guarded by this function.
*/
int net_ratelimit(void)
{
return __ratelimit(&net_ratelimit_state);
}
EXPORT_SYMBOL(net_ratelimit);
/*
* Convert an ASCII string to binary IP.
* This is outside of net/ipv4/ because various code that uses IP addresses
* is otherwise not dependent on the TCP/IP stack.
*/
__be32 in_aton(const char *str)
{
unsigned long l;
unsigned int val;
int i;
l = 0;
for (i = 0; i < 4; i++) {
l <<= 8;
if (*str != '\0') {
val = 0;
while (*str != '\0' && *str != '.' && *str != '\n') {
val *= 10;
val += *str - '0';
str++;
}
l |= val;
if (*str != '\0')
str++;
}
}
return htonl(l);
}
EXPORT_SYMBOL(in_aton);
#define IN6PTON_XDIGIT 0x00010000
#define IN6PTON_DIGIT 0x00020000
#define IN6PTON_COLON_MASK 0x00700000
#define IN6PTON_COLON_1 0x00100000 /* single : requested */
#define IN6PTON_COLON_2 0x00200000 /* second : requested */
#define IN6PTON_COLON_1_2 0x00400000 /* :: requested */
#define IN6PTON_DOT 0x00800000 /* . */
#define IN6PTON_DELIM 0x10000000
#define IN6PTON_NULL 0x20000000 /* first/tail */
#define IN6PTON_UNKNOWN 0x40000000
static inline int xdigit2bin(char c, int delim)
{
int val;
if (c == delim || c == '\0')
return IN6PTON_DELIM;
if (c == ':')
return IN6PTON_COLON_MASK;
if (c == '.')
return IN6PTON_DOT;
val = hex_to_bin(c);
if (val >= 0)
return val | IN6PTON_XDIGIT | (val < 10 ? IN6PTON_DIGIT : 0);
if (delim == -1)
return IN6PTON_DELIM;
return IN6PTON_UNKNOWN;
}
/**
* in4_pton - convert an IPv4 address from literal to binary representation
* @src: the start of the IPv4 address string
* @srclen: the length of the string, -1 means strlen(src)
* @dst: the binary (u8[4] array) representation of the IPv4 address
* @delim: the delimiter of the IPv4 address in @src, -1 means no delimiter
* @end: A pointer to the end of the parsed string will be placed here
*
* Return one on success, return zero when any error occurs
* and @end will point to the end of the parsed string.
*
*/
int in4_pton(const char *src, int srclen,
u8 *dst,
int delim, const char **end)
{
const char *s;
u8 *d;
u8 dbuf[4];
int ret = 0;
int i;
int w = 0;
if (srclen < 0)
srclen = strlen(src);
s = src;
d = dbuf;
i = 0;
while(1) {
int c;
c = xdigit2bin(srclen > 0 ? *s : '\0', delim);
if (!(c & (IN6PTON_DIGIT | IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK))) {
goto out;
}
if (c & (IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
if (w == 0)
goto out;
*d++ = w & 0xff;
w = 0;
i++;
if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
if (i != 4)
goto out;
break;
}
goto cont;
}
w = (w * 10) + c;
if ((w & 0xffff) > 255) {
goto out;
}
cont:
if (i >= 4)
goto out;
s++;
srclen--;
}
ret = 1;
memcpy(dst, dbuf, sizeof(dbuf));
out:
if (end)
*end = s;
return ret;
}
EXPORT_SYMBOL(in4_pton);
/**
* in6_pton - convert an IPv6 address from literal to binary representation
* @src: the start of the IPv6 address string
* @srclen: the length of the string, -1 means strlen(src)
* @dst: the binary (u8[16] array) representation of the IPv6 address
* @delim: the delimiter of the IPv6 address in @src, -1 means no delimiter
* @end: A pointer to the end of the parsed string will be placed here
*
* Return one on success, return zero when any error occurs
* and @end will point to the end of the parsed string.
*
*/
int in6_pton(const char *src, int srclen,
u8 *dst,
int delim, const char **end)
{
const char *s, *tok = NULL;
u8 *d, *dc = NULL;
u8 dbuf[16];
int ret = 0;
int i;
int state = IN6PTON_COLON_1_2 | IN6PTON_XDIGIT | IN6PTON_NULL;
int w = 0;
memset(dbuf, 0, sizeof(dbuf));
s = src;
d = dbuf;
if (srclen < 0)
srclen = strlen(src);
while (1) {
int c;
c = xdigit2bin(srclen > 0 ? *s : '\0', delim);
if (!(c & state))
goto out;
if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
/* process one 16-bit word */
if (!(state & IN6PTON_NULL)) {
*d++ = (w >> 8) & 0xff;
*d++ = w & 0xff;
}
w = 0;
if (c & IN6PTON_DELIM) {
/* We've processed last word */
break;
}
/*
* COLON_1 => XDIGIT
* COLON_2 => XDIGIT|DELIM
* COLON_1_2 => COLON_2
*/
switch (state & IN6PTON_COLON_MASK) {
case IN6PTON_COLON_2:
dc = d;
state = IN6PTON_XDIGIT | IN6PTON_DELIM;
if (dc - dbuf >= sizeof(dbuf))
state |= IN6PTON_NULL;
break;
case IN6PTON_COLON_1|IN6PTON_COLON_1_2:
state = IN6PTON_XDIGIT | IN6PTON_COLON_2;
break;
case IN6PTON_COLON_1:
state = IN6PTON_XDIGIT;
break;
case IN6PTON_COLON_1_2:
state = IN6PTON_COLON_2;
break;
default:
state = 0;
}
tok = s + 1;
goto cont;
}
if (c & IN6PTON_DOT) {
ret = in4_pton(tok ? tok : s, srclen + (int)(s - tok), d, delim, &s);
if (ret > 0) {
d += 4;
break;
}
goto out;
}
w = (w << 4) | (0xff & c);
state = IN6PTON_COLON_1 | IN6PTON_DELIM;
if (!(w & 0xf000)) {
state |= IN6PTON_XDIGIT;
}
if (!dc && d + 2 < dbuf + sizeof(dbuf)) {
state |= IN6PTON_COLON_1_2;
state &= ~IN6PTON_DELIM;
}
if (d + 2 >= dbuf + sizeof(dbuf)) {
state &= ~(IN6PTON_COLON_1|IN6PTON_COLON_1_2);
}
cont:
if ((dc && d + 4 < dbuf + sizeof(dbuf)) ||
d + 4 == dbuf + sizeof(dbuf)) {
state |= IN6PTON_DOT;
}
if (d >= dbuf + sizeof(dbuf)) {
state &= ~(IN6PTON_XDIGIT|IN6PTON_COLON_MASK);
}
s++;
srclen--;
}
i = 15; d--;
if (dc) {
while(d >= dc)
dst[i--] = *d--;
while(i >= dc - dbuf)
dst[i--] = 0;
while(i >= 0)
dst[i--] = *d--;
} else
memcpy(dst, dbuf, sizeof(dbuf));
ret = 1;
out:
if (end)
*end = s;
return ret;
}
EXPORT_SYMBOL(in6_pton);
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
__be32 from, __be32 to, int pseudohdr)
{
__be32 diff[] = { ~from, to };
if (skb->ip_summed != CHECKSUM_PARTIAL) {
*sum = csum_fold(csum_partial(diff, sizeof(diff),
~csum_unfold(*sum)));
if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
skb->csum = ~csum_partial(diff, sizeof(diff),
~skb->csum);
} else if (pseudohdr)
*sum = ~csum_fold(csum_partial(diff, sizeof(diff),
csum_unfold(*sum)));
}
EXPORT_SYMBOL(inet_proto_csum_replace4);
void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
const __be32 *from, const __be32 *to,
int pseudohdr)
{
__be32 diff[] = {
~from[0], ~from[1], ~from[2], ~from[3],
to[0], to[1], to[2], to[3],
};
if (skb->ip_summed != CHECKSUM_PARTIAL) {
*sum = csum_fold(csum_partial(diff, sizeof(diff),
~csum_unfold(*sum)));
if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
skb->csum = ~csum_partial(diff, sizeof(diff),
~skb->csum);
} else if (pseudohdr)
*sum = ~csum_fold(csum_partial(diff, sizeof(diff),
csum_unfold(*sum)));
}
EXPORT_SYMBOL(inet_proto_csum_replace16);
struct __net_random_once_work {
struct work_struct work;
struct static_key *key;
};
static void __net_random_once_deferred(struct work_struct *w)
{
struct __net_random_once_work *work =
container_of(w, struct __net_random_once_work, work);
BUG_ON(!static_key_enabled(work->key));
static_key_slow_dec(work->key);
kfree(work);
}
static void __net_random_once_disable_jump(struct static_key *key)
{
struct __net_random_once_work *w;
w = kmalloc(sizeof(*w), GFP_ATOMIC);
if (!w)
return;
INIT_WORK(&w->work, __net_random_once_deferred);
w->key = key;
schedule_work(&w->work);
}
bool __net_get_random_once(void *buf, int nbytes, bool *done,
struct static_key *once_key)
{
static DEFINE_SPINLOCK(lock);
unsigned long flags;
spin_lock_irqsave(&lock, flags);
if (*done) {
spin_unlock_irqrestore(&lock, flags);
return false;
}
get_random_bytes(buf, nbytes);
*done = true;
spin_unlock_irqrestore(&lock, flags);
__net_random_once_disable_jump(once_key);
return true;
}
EXPORT_SYMBOL(__net_get_random_once);
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_2286_1 |
crossvul-cpp_data_good_3831_0 | /*
* L2TPv3 IP encapsulation support for IPv6
*
* Copyright (c) 2012 Katalix Systems Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/icmp.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/random.h>
#include <linux/socket.h>
#include <linux/l2tp.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
#include <net/transp_v6.h>
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include "l2tp_core.h"
struct l2tp_ip6_sock {
/* inet_sock has to be the first member of l2tp_ip6_sock */
struct inet_sock inet;
u32 conn_id;
u32 peer_conn_id;
/* ipv6_pinfo has to be the last member of l2tp_ip6_sock, see
inet6_sk_generic */
struct ipv6_pinfo inet6;
};
static DEFINE_RWLOCK(l2tp_ip6_lock);
static struct hlist_head l2tp_ip6_table;
static struct hlist_head l2tp_ip6_bind_table;
static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
{
return (struct l2tp_ip6_sock *)sk;
}
static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
struct in6_addr *laddr,
int dif, u32 tunnel_id)
{
struct hlist_node *node;
struct sock *sk;
sk_for_each_bound(sk, node, &l2tp_ip6_bind_table) {
struct in6_addr *addr = inet6_rcv_saddr(sk);
struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
if (l2tp == NULL)
continue;
if ((l2tp->conn_id == tunnel_id) &&
net_eq(sock_net(sk), net) &&
!(addr && ipv6_addr_equal(addr, laddr)) &&
!(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
goto found;
}
sk = NULL;
found:
return sk;
}
static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
struct in6_addr *laddr,
int dif, u32 tunnel_id)
{
struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id);
if (sk)
sock_hold(sk);
return sk;
}
/* When processing receive frames, there are two cases to
* consider. Data frames consist of a non-zero session-id and an
* optional cookie. Control frames consist of a regular L2TP header
* preceded by 32-bits of zeros.
*
* L2TPv3 Session Header Over IP
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Session ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Cookie (optional, maximum 64 bits)...
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* L2TPv3 Control Message Header Over IP
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | (32 bits of zeros) |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Control Connection ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Ns | Nr |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* All control frames are passed to userspace.
*/
static int l2tp_ip6_recv(struct sk_buff *skb)
{
struct sock *sk;
u32 session_id;
u32 tunnel_id;
unsigned char *ptr, *optr;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
int length;
/* Point to L2TP header */
optr = ptr = skb->data;
if (!pskb_may_pull(skb, 4))
goto discard;
session_id = ntohl(*((__be32 *) ptr));
ptr += 4;
/* RFC3931: L2TP/IP packets have the first 4 bytes containing
* the session_id. If it is 0, the packet is a L2TP control
* frame and the session_id value can be discarded.
*/
if (session_id == 0) {
__skb_pull(skb, 4);
goto pass_up;
}
/* Ok, this is a data packet. Lookup the session. */
session = l2tp_session_find(&init_net, NULL, session_id);
if (session == NULL)
goto discard;
tunnel = session->tunnel;
if (tunnel == NULL)
goto discard;
/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
goto discard;
pr_debug("%s: ip recv\n", tunnel->name);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
tunnel->recv_payload_hook);
return 0;
pass_up:
/* Get the tunnel_id from the L2TP header */
if (!pskb_may_pull(skb, 12))
goto discard;
if ((skb->data[0] & 0xc0) != 0xc0)
goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
if (tunnel != NULL)
sk = tunnel->sock;
else {
struct ipv6hdr *iph = ipv6_hdr(skb);
read_lock_bh(&l2tp_ip6_lock);
sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr,
0, tunnel_id);
read_unlock_bh(&l2tp_ip6_lock);
}
if (sk == NULL)
goto discard;
sock_hold(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
nf_reset(skb);
return sk_receive_skb(sk, skb, 1);
discard_put:
sock_put(sk);
discard:
kfree_skb(skb);
return 0;
}
static int l2tp_ip6_open(struct sock *sk)
{
/* Prevent autobind. We don't have ports. */
inet_sk(sk)->inet_num = IPPROTO_L2TP;
write_lock_bh(&l2tp_ip6_lock);
sk_add_node(sk, &l2tp_ip6_table);
write_unlock_bh(&l2tp_ip6_lock);
return 0;
}
static void l2tp_ip6_close(struct sock *sk, long timeout)
{
write_lock_bh(&l2tp_ip6_lock);
hlist_del_init(&sk->sk_bind_node);
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip6_lock);
sk_common_release(sk);
}
static void l2tp_ip6_destroy_sock(struct sock *sk)
{
lock_sock(sk);
ip6_flush_pending_frames(sk);
release_sock(sk);
inet6_destroy_sock(sk);
}
static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
__be32 v4addr = 0;
int addr_type;
int err;
if (!sock_flag(sk, SOCK_ZAPPED))
return -EINVAL;
if (addr->l2tp_family != AF_INET6)
return -EINVAL;
if (addr_len < sizeof(*addr))
return -EINVAL;
addr_type = ipv6_addr_type(&addr->l2tp_addr);
/* l2tp_ip6 sockets are IPv6 only */
if (addr_type == IPV6_ADDR_MAPPED)
return -EADDRNOTAVAIL;
/* L2TP is point-point, not multicast */
if (addr_type & IPV6_ADDR_MULTICAST)
return -EADDRNOTAVAIL;
err = -EADDRINUSE;
read_lock_bh(&l2tp_ip6_lock);
if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr,
sk->sk_bound_dev_if, addr->l2tp_conn_id))
goto out_in_use;
read_unlock_bh(&l2tp_ip6_lock);
lock_sock(sk);
err = -EINVAL;
if (sk->sk_state != TCP_CLOSE)
goto out_unlock;
/* Check if the address belongs to the host. */
rcu_read_lock();
if (addr_type != IPV6_ADDR_ANY) {
struct net_device *dev = NULL;
if (addr_type & IPV6_ADDR_LINKLOCAL) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
addr->l2tp_scope_id) {
/* Override any existing binding, if another
* one is supplied by user.
*/
sk->sk_bound_dev_if = addr->l2tp_scope_id;
}
/* Binding to link-local address requires an
interface */
if (!sk->sk_bound_dev_if)
goto out_unlock_rcu;
err = -ENODEV;
dev = dev_get_by_index_rcu(sock_net(sk),
sk->sk_bound_dev_if);
if (!dev)
goto out_unlock_rcu;
}
/* ipv4 addr of the socket is invalid. Only the
* unspecified and mapped address have a v4 equivalent.
*/
v4addr = LOOPBACK4_IPV6;
err = -EADDRNOTAVAIL;
if (!ipv6_chk_addr(sock_net(sk), &addr->l2tp_addr, dev, 0))
goto out_unlock_rcu;
}
rcu_read_unlock();
inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
np->rcv_saddr = addr->l2tp_addr;
np->saddr = addr->l2tp_addr;
l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
write_lock_bh(&l2tp_ip6_lock);
sk_add_bind_node(sk, &l2tp_ip6_bind_table);
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip6_lock);
sock_reset_flag(sk, SOCK_ZAPPED);
release_sock(sk);
return 0;
out_unlock_rcu:
rcu_read_unlock();
out_unlock:
release_sock(sk);
return err;
out_in_use:
read_unlock_bh(&l2tp_ip6_lock);
return err;
}
static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
{
struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *) uaddr;
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct in6_addr *daddr;
int addr_type;
int rc;
if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
return -EINVAL;
if (addr_len < sizeof(*lsa))
return -EINVAL;
addr_type = ipv6_addr_type(&usin->sin6_addr);
if (addr_type & IPV6_ADDR_MULTICAST)
return -EINVAL;
if (addr_type & IPV6_ADDR_MAPPED) {
daddr = &usin->sin6_addr;
if (ipv4_is_multicast(daddr->s6_addr32[3]))
return -EINVAL;
}
rc = ip6_datagram_connect(sk, uaddr, addr_len);
lock_sock(sk);
l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
write_lock_bh(&l2tp_ip6_lock);
hlist_del_init(&sk->sk_bind_node);
sk_add_bind_node(sk, &l2tp_ip6_bind_table);
write_unlock_bh(&l2tp_ip6_lock);
release_sock(sk);
return rc;
}
static int l2tp_ip6_disconnect(struct sock *sk, int flags)
{
if (sock_flag(sk, SOCK_ZAPPED))
return 0;
return udp_disconnect(sk, flags);
}
static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
struct sock *sk = sock->sk;
struct ipv6_pinfo *np = inet6_sk(sk);
struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk);
lsa->l2tp_family = AF_INET6;
lsa->l2tp_flowinfo = 0;
lsa->l2tp_scope_id = 0;
lsa->l2tp_unused = 0;
if (peer) {
if (!lsk->peer_conn_id)
return -ENOTCONN;
lsa->l2tp_conn_id = lsk->peer_conn_id;
lsa->l2tp_addr = np->daddr;
if (np->sndflow)
lsa->l2tp_flowinfo = np->flow_label;
} else {
if (ipv6_addr_any(&np->rcv_saddr))
lsa->l2tp_addr = np->saddr;
else
lsa->l2tp_addr = np->rcv_saddr;
lsa->l2tp_conn_id = lsk->conn_id;
}
if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
lsa->l2tp_scope_id = sk->sk_bound_dev_if;
*uaddr_len = sizeof(*lsa);
return 0;
}
static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
{
int rc;
/* Charge it to the socket, dropping if the queue is full. */
rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0)
goto drop;
return 0;
drop:
IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
kfree_skb(skb);
return -1;
}
static int l2tp_ip6_push_pending_frames(struct sock *sk)
{
struct sk_buff *skb;
__be32 *transhdr = NULL;
int err = 0;
skb = skb_peek(&sk->sk_write_queue);
if (skb == NULL)
goto out;
transhdr = (__be32 *)skb_transport_header(skb);
*transhdr = 0;
err = ip6_push_pending_frames(sk);
out:
return err;
}
/* Userspace will call sendmsg() on the tunnel socket to send L2TP
* control frames.
*/
static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len)
{
struct ipv6_txoptions opt_space;
struct sockaddr_l2tpip6 *lsa =
(struct sockaddr_l2tpip6 *) msg->msg_name;
struct in6_addr *daddr, *final_p, final;
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct dst_entry *dst = NULL;
struct flowi6 fl6;
int addr_len = msg->msg_namelen;
int hlimit = -1;
int tclass = -1;
int dontfrag = -1;
int transhdrlen = 4; /* zero session-id */
int ulen = len + transhdrlen;
int err;
/* Rough check on arithmetic overflow,
better check is made in ip6_append_data().
*/
if (len > INT_MAX)
return -EMSGSIZE;
/* Mirror BSD error message compatibility */
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
/*
* Get and verify the address.
*/
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = sk->sk_mark;
if (lsa) {
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (lsa->l2tp_family && lsa->l2tp_family != AF_INET6)
return -EAFNOSUPPORT;
daddr = &lsa->l2tp_addr;
if (np->sndflow) {
fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
daddr = &flowlabel->dst;
}
}
/*
* Otherwise it will be difficult to maintain
* sk->sk_dst_cache.
*/
if (sk->sk_state == TCP_ESTABLISHED &&
ipv6_addr_equal(daddr, &np->daddr))
daddr = &np->daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
lsa->l2tp_scope_id &&
ipv6_addr_type(daddr) & IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = lsa->l2tp_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = &np->daddr;
fl6.flowlabel = np->flow_label;
}
if (fl6.flowi6_oif == 0)
fl6.flowi6_oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions);
err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
&hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
}
if ((fl6.flowlabel & IPV6_FLOWLABEL_MASK) && !flowlabel) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
}
if (opt == NULL)
opt = np->opt;
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = sk->sk_protocol;
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
final_p = fl6_update_dst(&fl6, opt, &final);
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto out;
}
if (hlimit < 0) {
if (ipv6_addr_is_multicast(&fl6.daddr))
hlimit = np->mcast_hops;
else
hlimit = np->hop_limit;
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
}
if (tclass < 0)
tclass = np->tclass;
if (dontfrag < 0)
dontfrag = np->dontfrag;
if (msg->msg_flags & MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
lock_sock(sk);
err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
ulen, transhdrlen, hlimit, tclass, opt,
&fl6, (struct rt6_info *)dst,
msg->msg_flags, dontfrag);
if (err)
ip6_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE))
err = l2tp_ip6_push_pending_frames(sk);
release_sock(sk);
done:
dst_release(dst);
out:
fl6_sock_release(flowlabel);
return err < 0 ? err : len;
do_confirm:
dst_confirm(dst);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto done;
}
static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sk_buff *skb;
if (flags & MSG_OOB)
goto out;
if (addr_len)
*addr_len = sizeof(*lsa);
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_timestamp(msg, sk, skb);
/* Copy the address. */
if (lsa) {
lsa->l2tp_family = AF_INET6;
lsa->l2tp_unused = 0;
lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
lsa->l2tp_flowinfo = 0;
lsa->l2tp_scope_id = 0;
if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
lsa->l2tp_scope_id = IP6CB(skb)->iif;
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
return err ? err : copied;
}
static struct proto l2tp_ip6_prot = {
.name = "L2TP/IPv6",
.owner = THIS_MODULE,
.init = l2tp_ip6_open,
.close = l2tp_ip6_close,
.bind = l2tp_ip6_bind,
.connect = l2tp_ip6_connect,
.disconnect = l2tp_ip6_disconnect,
.ioctl = udp_ioctl,
.destroy = l2tp_ip6_destroy_sock,
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.sendmsg = l2tp_ip6_sendmsg,
.recvmsg = l2tp_ip6_recvmsg,
.backlog_rcv = l2tp_ip6_backlog_recv,
.hash = inet_hash,
.unhash = inet_unhash,
.obj_size = sizeof(struct l2tp_ip6_sock),
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
#endif
};
static const struct proto_ops l2tp_ip6_ops = {
.family = PF_INET6,
.owner = THIS_MODULE,
.release = inet6_release,
.bind = inet6_bind,
.connect = inet_dgram_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = l2tp_ip6_getname,
.poll = datagram_poll,
.ioctl = inet6_ioctl,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
static struct inet_protosw l2tp_ip6_protosw = {
.type = SOCK_DGRAM,
.protocol = IPPROTO_L2TP,
.prot = &l2tp_ip6_prot,
.ops = &l2tp_ip6_ops,
.no_check = 0,
};
static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
.handler = l2tp_ip6_recv,
};
static int __init l2tp_ip6_init(void)
{
int err;
pr_info("L2TP IP encapsulation support for IPv6 (L2TPv3)\n");
err = proto_register(&l2tp_ip6_prot, 1);
if (err != 0)
goto out;
err = inet6_add_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
if (err)
goto out1;
inet6_register_protosw(&l2tp_ip6_protosw);
return 0;
out1:
proto_unregister(&l2tp_ip6_prot);
out:
return err;
}
static void __exit l2tp_ip6_exit(void)
{
inet6_unregister_protosw(&l2tp_ip6_protosw);
inet6_del_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
proto_unregister(&l2tp_ip6_prot);
}
module_init(l2tp_ip6_init);
module_exit(l2tp_ip6_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chris Elston <celston@katalix.com>");
MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6");
MODULE_VERSION("1.0");
/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
* enums
*/
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 2, IPPROTO_L2TP);
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_3831_0 |
crossvul-cpp_data_good_2527_0 | /*
* Timers abstract layer
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/sched/signal.h>
#include <sound/core.h>
#include <sound/timer.h>
#include <sound/control.h>
#include <sound/info.h>
#include <sound/minors.h>
#include <sound/initval.h>
#include <linux/kmod.h>
/* internal flags */
#define SNDRV_TIMER_IFLG_PAUSED 0x00010000
#if IS_ENABLED(CONFIG_SND_HRTIMER)
#define DEFAULT_TIMER_LIMIT 4
#else
#define DEFAULT_TIMER_LIMIT 1
#endif
static int timer_limit = DEFAULT_TIMER_LIMIT;
static int timer_tstamp_monotonic = 1;
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>");
MODULE_DESCRIPTION("ALSA timer interface");
MODULE_LICENSE("GPL");
module_param(timer_limit, int, 0444);
MODULE_PARM_DESC(timer_limit, "Maximum global timers in system.");
module_param(timer_tstamp_monotonic, int, 0444);
MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default).");
MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER);
MODULE_ALIAS("devname:snd/timer");
struct snd_timer_user {
struct snd_timer_instance *timeri;
int tread; /* enhanced read with timestamps and events */
unsigned long ticks;
unsigned long overrun;
int qhead;
int qtail;
int qused;
int queue_size;
bool disconnected;
struct snd_timer_read *queue;
struct snd_timer_tread *tqueue;
spinlock_t qlock;
unsigned long last_resolution;
unsigned int filter;
struct timespec tstamp; /* trigger tstamp */
wait_queue_head_t qchange_sleep;
struct fasync_struct *fasync;
struct mutex ioctl_lock;
};
/* list of timers */
static LIST_HEAD(snd_timer_list);
/* list of slave instances */
static LIST_HEAD(snd_timer_slave_list);
/* lock for slave active lists */
static DEFINE_SPINLOCK(slave_active_lock);
static DEFINE_MUTEX(register_mutex);
static int snd_timer_free(struct snd_timer *timer);
static int snd_timer_dev_free(struct snd_device *device);
static int snd_timer_dev_register(struct snd_device *device);
static int snd_timer_dev_disconnect(struct snd_device *device);
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left);
/*
* create a timer instance with the given owner string.
* when timer is not NULL, increments the module counter
*/
static struct snd_timer_instance *snd_timer_instance_new(char *owner,
struct snd_timer *timer)
{
struct snd_timer_instance *timeri;
timeri = kzalloc(sizeof(*timeri), GFP_KERNEL);
if (timeri == NULL)
return NULL;
timeri->owner = kstrdup(owner, GFP_KERNEL);
if (! timeri->owner) {
kfree(timeri);
return NULL;
}
INIT_LIST_HEAD(&timeri->open_list);
INIT_LIST_HEAD(&timeri->active_list);
INIT_LIST_HEAD(&timeri->ack_list);
INIT_LIST_HEAD(&timeri->slave_list_head);
INIT_LIST_HEAD(&timeri->slave_active_head);
timeri->timer = timer;
if (timer && !try_module_get(timer->module)) {
kfree(timeri->owner);
kfree(timeri);
return NULL;
}
return timeri;
}
/*
* find a timer instance from the given timer id
*/
static struct snd_timer *snd_timer_find(struct snd_timer_id *tid)
{
struct snd_timer *timer = NULL;
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->tmr_class != tid->dev_class)
continue;
if ((timer->tmr_class == SNDRV_TIMER_CLASS_CARD ||
timer->tmr_class == SNDRV_TIMER_CLASS_PCM) &&
(timer->card == NULL ||
timer->card->number != tid->card))
continue;
if (timer->tmr_device != tid->device)
continue;
if (timer->tmr_subdevice != tid->subdevice)
continue;
return timer;
}
return NULL;
}
#ifdef CONFIG_MODULES
static void snd_timer_request(struct snd_timer_id *tid)
{
switch (tid->dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
if (tid->device < timer_limit)
request_module("snd-timer-%i", tid->device);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (tid->card < snd_ecards_limit)
request_module("snd-card-%i", tid->card);
break;
default:
break;
}
}
#endif
/*
* look for a master instance matching with the slave id of the given slave.
* when found, relink the open_link of the slave.
*
* call this with register_mutex down.
*/
static void snd_timer_check_slave(struct snd_timer_instance *slave)
{
struct snd_timer *timer;
struct snd_timer_instance *master;
/* FIXME: it's really dumb to look up all entries.. */
list_for_each_entry(timer, &snd_timer_list, device_list) {
list_for_each_entry(master, &timer->open_list_head, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list,
&master->slave_list_head);
spin_lock_irq(&slave_active_lock);
slave->master = master;
slave->timer = master->timer;
spin_unlock_irq(&slave_active_lock);
return;
}
}
}
}
/*
* look for slave instances matching with the slave id of the given master.
* when found, relink the open_link of slaves.
*
* call this with register_mutex down.
*/
static void snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
/* check all pending slaves */
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list, &master->slave_list_head);
spin_lock_irq(&slave_active_lock);
spin_lock(&master->timer->lock);
slave->master = master;
slave->timer = master->timer;
if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
list_add_tail(&slave->active_list,
&master->slave_active_head);
spin_unlock(&master->timer->lock);
spin_unlock_irq(&slave_active_lock);
}
}
}
/*
* open a timer instance
* when opening a master, the slave id must be here given.
*/
int snd_timer_open(struct snd_timer_instance **ti,
char *owner, struct snd_timer_id *tid,
unsigned int slave_id)
{
struct snd_timer *timer;
struct snd_timer_instance *timeri = NULL;
if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
/* open a slave instance */
if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE ||
tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) {
pr_debug("ALSA: timer: invalid slave class %i\n",
tid->dev_sclass);
return -EINVAL;
}
mutex_lock(®ister_mutex);
timeri = snd_timer_instance_new(owner, NULL);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
snd_timer_check_slave(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
/* open a master instance */
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
#ifdef CONFIG_MODULES
if (!timer) {
mutex_unlock(®ister_mutex);
snd_timer_request(tid);
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
}
#endif
if (!timer) {
mutex_unlock(®ister_mutex);
return -ENODEV;
}
if (!list_empty(&timer->open_list_head)) {
timeri = list_entry(timer->open_list_head.next,
struct snd_timer_instance, open_list);
if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
mutex_unlock(®ister_mutex);
return -EBUSY;
}
}
timeri = snd_timer_instance_new(owner, timer);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
/* take a card refcount for safe disconnection */
if (timer->card)
get_device(&timer->card->card_dev);
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = slave_id;
if (list_empty(&timer->open_list_head) && timer->hw.open) {
int err = timer->hw.open(timer);
if (err) {
kfree(timeri->owner);
kfree(timeri);
if (timer->card)
put_device(&timer->card->card_dev);
module_put(timer->module);
mutex_unlock(®ister_mutex);
return err;
}
}
list_add_tail(&timeri->open_list, &timer->open_list_head);
snd_timer_check_master(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
/*
* close a timer instance
*/
int snd_timer_close(struct snd_timer_instance *timeri)
{
struct snd_timer *timer = NULL;
struct snd_timer_instance *slave, *tmp;
if (snd_BUG_ON(!timeri))
return -ENXIO;
mutex_lock(®ister_mutex);
list_del(&timeri->open_list);
/* force to stop the timer */
snd_timer_stop(timeri);
timer = timeri->timer;
if (timer) {
/* wait, until the active callback is finished */
spin_lock_irq(&timer->lock);
while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
spin_unlock_irq(&timer->lock);
udelay(10);
spin_lock_irq(&timer->lock);
}
spin_unlock_irq(&timer->lock);
/* remove slave links */
spin_lock_irq(&slave_active_lock);
spin_lock(&timer->lock);
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
open_list) {
list_move_tail(&slave->open_list, &snd_timer_slave_list);
slave->master = NULL;
slave->timer = NULL;
list_del_init(&slave->ack_list);
list_del_init(&slave->active_list);
}
spin_unlock(&timer->lock);
spin_unlock_irq(&slave_active_lock);
/* slave doesn't need to release timer resources below */
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
timer = NULL;
}
if (timeri->private_free)
timeri->private_free(timeri);
kfree(timeri->owner);
kfree(timeri);
if (timer) {
if (list_empty(&timer->open_list_head) && timer->hw.close)
timer->hw.close(timer);
/* release a card refcount for safe disconnection */
if (timer->card)
put_device(&timer->card->card_dev);
module_put(timer->module);
}
mutex_unlock(®ister_mutex);
return 0;
}
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
{
struct snd_timer * timer;
if (timeri == NULL)
return 0;
if ((timer = timeri->timer) != NULL) {
if (timer->hw.c_resolution)
return timer->hw.c_resolution(timer);
return timer->hw.resolution;
}
return 0;
}
static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
{
struct snd_timer *timer;
unsigned long resolution = 0;
struct snd_timer_instance *ts;
struct timespec tstamp;
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START ||
event > SNDRV_TIMER_EVENT_PAUSE))
return;
if (event == SNDRV_TIMER_EVENT_START ||
event == SNDRV_TIMER_EVENT_CONTINUE)
resolution = snd_timer_resolution(ti);
if (ti->ccallback)
ti->ccallback(ti, event, &tstamp, resolution);
if (ti->flags & SNDRV_TIMER_IFLG_SLAVE)
return;
timer = ti->timer;
if (timer == NULL)
return;
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
return;
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event + 100, &tstamp, resolution);
}
/* start/continue a master timer */
static int snd_timer_start1(struct snd_timer_instance *timeri,
bool start, unsigned long ticks)
{
struct snd_timer *timer;
int result;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (timer->card && timer->card->shutdown) {
result = -ENODEV;
goto unlock;
}
if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START)) {
result = -EBUSY;
goto unlock;
}
if (start)
timeri->ticks = timeri->cticks = ticks;
else if (!timeri->cticks)
timeri->cticks = 1;
timeri->pticks = 0;
list_move_tail(&timeri->active_list, &timer->active_list_head);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
goto __start_now;
timer->flags |= SNDRV_TIMER_FLG_RESCHED;
timeri->flags |= SNDRV_TIMER_IFLG_START;
result = 1; /* delayed start */
} else {
if (start)
timer->sticks = ticks;
timer->hw.start(timer);
__start_now:
timer->running++;
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
result = 0;
}
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* start/continue a slave timer */
static int snd_timer_start_slave(struct snd_timer_instance *timeri,
bool start)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master && timeri->timer) {
spin_lock(&timeri->timer->lock);
list_add_tail(&timeri->active_list,
&timeri->master->slave_active_head);
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 1; /* delayed start */
}
/* stop/pause a master timer */
static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
{
struct snd_timer *timer;
int result = 0;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START))) {
result = -EBUSY;
goto unlock;
}
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if (timer->card && timer->card->shutdown)
goto unlock;
if (stop) {
timeri->cticks = timeri->ticks;
timeri->pticks = 0;
}
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
timer->hw.stop(timer);
if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
snd_timer_reschedule(timer, 0);
if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
}
}
timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
if (stop)
timeri->flags &= ~SNDRV_TIMER_IFLG_PAUSED;
else
timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* stop/pause a slave timer */
static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
if (timeri->timer) {
spin_lock(&timeri->timer->lock);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 0;
}
/*
* start the timer instance
*/
int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
{
if (timeri == NULL || ticks < 1)
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, true);
else
return snd_timer_start1(timeri, true, ticks);
}
/*
* stop the timer instance.
*
* do not call this from the timer callback!
*/
int snd_timer_stop(struct snd_timer_instance *timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, true);
else
return snd_timer_stop1(timeri, true);
}
/*
* start again.. the tick is kept.
*/
int snd_timer_continue(struct snd_timer_instance *timeri)
{
/* timer can continue only after pause */
if (!(timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, false);
else
return snd_timer_start1(timeri, false, 0);
}
/*
* pause.. remember the ticks left
*/
int snd_timer_pause(struct snd_timer_instance * timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, false);
else
return snd_timer_stop1(timeri, false);
}
/*
* reschedule the timer
*
* start pending instances and check the scheduling ticks.
* when the scheduling ticks is changed set CHANGE flag to reprogram the timer.
*/
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti;
unsigned long ticks = ~0UL;
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->flags & SNDRV_TIMER_IFLG_START) {
ti->flags &= ~SNDRV_TIMER_IFLG_START;
ti->flags |= SNDRV_TIMER_IFLG_RUNNING;
timer->running++;
}
if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) {
if (ticks > ti->cticks)
ticks = ti->cticks;
}
}
if (ticks == ~0UL) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
return;
}
if (ticks > timer->hw.ticks)
ticks = timer->hw.ticks;
if (ticks_left != ticks)
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
timer->sticks = ticks;
}
/*
* timer tasklet
*
*/
static void snd_timer_tasklet(unsigned long arg)
{
struct snd_timer *timer = (struct snd_timer *) arg;
struct snd_timer_instance *ti;
struct list_head *p;
unsigned long resolution, ticks;
unsigned long flags;
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* now process all callbacks */
while (!list_empty(&timer->sack_list_head)) {
p = timer->sack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
resolution = ti->resolution;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* timer interrupt
*
* ticks_left is usually equal to timer->sticks.
*
*/
void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti, *ts, *tmp;
unsigned long resolution, ticks;
struct list_head *p, *ack_list_head;
unsigned long flags;
int use_tasklet = 0;
if (timer == NULL)
return;
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* remember the current resolution */
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
/* loop for all active instances
* Here we cannot use list_for_each_entry because the active_list of a
* processed instance is relinked to done_list_head before the callback
* is called.
*/
list_for_each_entry_safe(ti, tmp, &timer->active_list_head,
active_list) {
if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING))
continue;
ti->pticks += ticks_left;
ti->resolution = resolution;
if (ti->cticks < ticks_left)
ti->cticks = 0;
else
ti->cticks -= ticks_left;
if (ti->cticks) /* not expired */
continue;
if (ti->flags & SNDRV_TIMER_IFLG_AUTO) {
ti->cticks = ti->ticks;
} else {
ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
--timer->running;
list_del_init(&ti->active_list);
}
if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
(ti->flags & SNDRV_TIMER_IFLG_FAST))
ack_list_head = &timer->ack_list_head;
else
ack_list_head = &timer->sack_list_head;
if (list_empty(&ti->ack_list))
list_add_tail(&ti->ack_list, ack_list_head);
list_for_each_entry(ts, &ti->slave_active_head, active_list) {
ts->pticks = ti->pticks;
ts->resolution = resolution;
if (list_empty(&ts->ack_list))
list_add_tail(&ts->ack_list, ack_list_head);
}
}
if (timer->flags & SNDRV_TIMER_FLG_RESCHED)
snd_timer_reschedule(timer, timer->sticks);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_STOP) {
timer->hw.stop(timer);
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
}
if (!(timer->hw.flags & SNDRV_TIMER_HW_AUTO) ||
(timer->flags & SNDRV_TIMER_FLG_CHANGE)) {
/* restart timer */
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
} else {
timer->hw.stop(timer);
}
/* now process all fast callbacks */
while (!list_empty(&timer->ack_list_head)) {
p = timer->ack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
/* do we have any slow callbacks? */
use_tasklet = !list_empty(&timer->sack_list_head);
spin_unlock_irqrestore(&timer->lock, flags);
if (use_tasklet)
tasklet_schedule(&timer->task_queue);
}
/*
*/
int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
struct snd_timer **rtimer)
{
struct snd_timer *timer;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_timer_dev_free,
.dev_register = snd_timer_dev_register,
.dev_disconnect = snd_timer_dev_disconnect,
};
if (snd_BUG_ON(!tid))
return -EINVAL;
if (rtimer)
*rtimer = NULL;
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
timer->tmr_class = tid->dev_class;
timer->card = card;
timer->tmr_device = tid->device;
timer->tmr_subdevice = tid->subdevice;
if (id)
strlcpy(timer->id, id, sizeof(timer->id));
timer->sticks = 1;
INIT_LIST_HEAD(&timer->device_list);
INIT_LIST_HEAD(&timer->open_list_head);
INIT_LIST_HEAD(&timer->active_list_head);
INIT_LIST_HEAD(&timer->ack_list_head);
INIT_LIST_HEAD(&timer->sack_list_head);
spin_lock_init(&timer->lock);
tasklet_init(&timer->task_queue, snd_timer_tasklet,
(unsigned long)timer);
if (card != NULL) {
timer->module = card->module;
err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
if (err < 0) {
snd_timer_free(timer);
return err;
}
}
if (rtimer)
*rtimer = timer;
return 0;
}
static int snd_timer_free(struct snd_timer *timer)
{
if (!timer)
return 0;
mutex_lock(®ister_mutex);
if (! list_empty(&timer->open_list_head)) {
struct list_head *p, *n;
struct snd_timer_instance *ti;
pr_warn("ALSA: timer %p is busy?\n", timer);
list_for_each_safe(p, n, &timer->open_list_head) {
list_del_init(p);
ti = list_entry(p, struct snd_timer_instance, open_list);
ti->timer = NULL;
}
}
list_del(&timer->device_list);
mutex_unlock(®ister_mutex);
if (timer->private_free)
timer->private_free(timer);
kfree(timer);
return 0;
}
static int snd_timer_dev_free(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
return snd_timer_free(timer);
}
static int snd_timer_dev_register(struct snd_device *dev)
{
struct snd_timer *timer = dev->device_data;
struct snd_timer *timer1;
if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop))
return -ENXIO;
if (!(timer->hw.flags & SNDRV_TIMER_HW_SLAVE) &&
!timer->hw.resolution && timer->hw.c_resolution == NULL)
return -EINVAL;
mutex_lock(®ister_mutex);
list_for_each_entry(timer1, &snd_timer_list, device_list) {
if (timer1->tmr_class > timer->tmr_class)
break;
if (timer1->tmr_class < timer->tmr_class)
continue;
if (timer1->card && timer->card) {
if (timer1->card->number > timer->card->number)
break;
if (timer1->card->number < timer->card->number)
continue;
}
if (timer1->tmr_device > timer->tmr_device)
break;
if (timer1->tmr_device < timer->tmr_device)
continue;
if (timer1->tmr_subdevice > timer->tmr_subdevice)
break;
if (timer1->tmr_subdevice < timer->tmr_subdevice)
continue;
/* conflicts.. */
mutex_unlock(®ister_mutex);
return -EBUSY;
}
list_add_tail(&timer->device_list, &timer1->device_list);
mutex_unlock(®ister_mutex);
return 0;
}
static int snd_timer_dev_disconnect(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_del_init(&timer->device_list);
/* wake up pending sleepers */
list_for_each_entry(ti, &timer->open_list_head, open_list) {
if (ti->disconnect)
ti->disconnect(ti);
}
mutex_unlock(®ister_mutex);
return 0;
}
void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp)
{
unsigned long flags;
unsigned long resolution = 0;
struct snd_timer_instance *ti, *ts;
if (timer->card && timer->card->shutdown)
return;
if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
return;
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
event > SNDRV_TIMER_EVENT_MRESUME))
return;
spin_lock_irqsave(&timer->lock, flags);
if (event == SNDRV_TIMER_EVENT_MSTART ||
event == SNDRV_TIMER_EVENT_MCONTINUE ||
event == SNDRV_TIMER_EVENT_MRESUME) {
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
}
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->ccallback)
ti->ccallback(ti, event, tstamp, resolution);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event, tstamp, resolution);
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* exported functions for global timers
*/
int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer)
{
struct snd_timer_id tid;
tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL;
tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE;
tid.card = -1;
tid.device = device;
tid.subdevice = 0;
return snd_timer_new(NULL, id, &tid, rtimer);
}
int snd_timer_global_free(struct snd_timer *timer)
{
return snd_timer_free(timer);
}
int snd_timer_global_register(struct snd_timer *timer)
{
struct snd_device dev;
memset(&dev, 0, sizeof(dev));
dev.device_data = timer;
return snd_timer_dev_register(&dev);
}
/*
* System timer
*/
struct snd_timer_system_private {
struct timer_list tlist;
unsigned long last_expires;
unsigned long last_jiffies;
unsigned long correction;
};
static void snd_timer_s_function(unsigned long data)
{
struct snd_timer *timer = (struct snd_timer *)data;
struct snd_timer_system_private *priv = timer->private_data;
unsigned long jiff = jiffies;
if (time_after(jiff, priv->last_expires))
priv->correction += (long)jiff - (long)priv->last_expires;
snd_timer_interrupt(timer, (long)jiff - (long)priv->last_jiffies);
}
static int snd_timer_s_start(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long njiff;
priv = (struct snd_timer_system_private *) timer->private_data;
njiff = (priv->last_jiffies = jiffies);
if (priv->correction > timer->sticks - 1) {
priv->correction -= timer->sticks - 1;
njiff++;
} else {
njiff += timer->sticks - priv->correction;
priv->correction = 0;
}
priv->last_expires = njiff;
mod_timer(&priv->tlist, njiff);
return 0;
}
static int snd_timer_s_stop(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long jiff;
priv = (struct snd_timer_system_private *) timer->private_data;
del_timer(&priv->tlist);
jiff = jiffies;
if (time_before(jiff, priv->last_expires))
timer->sticks = priv->last_expires - jiff;
else
timer->sticks = 1;
priv->correction = 0;
return 0;
}
static int snd_timer_s_close(struct snd_timer *timer)
{
struct snd_timer_system_private *priv;
priv = (struct snd_timer_system_private *)timer->private_data;
del_timer_sync(&priv->tlist);
return 0;
}
static struct snd_timer_hardware snd_timer_system =
{
.flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET,
.resolution = 1000000000L / HZ,
.ticks = 10000000L,
.close = snd_timer_s_close,
.start = snd_timer_s_start,
.stop = snd_timer_s_stop
};
static void snd_timer_free_system(struct snd_timer *timer)
{
kfree(timer->private_data);
}
static int snd_timer_register_system(void)
{
struct snd_timer *timer;
struct snd_timer_system_private *priv;
int err;
err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer);
if (err < 0)
return err;
strcpy(timer->name, "system timer");
timer->hw = snd_timer_system;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL) {
snd_timer_free(timer);
return -ENOMEM;
}
setup_timer(&priv->tlist, snd_timer_s_function, (unsigned long) timer);
timer->private_data = priv;
timer->private_free = snd_timer_free_system;
return snd_timer_global_register(timer);
}
#ifdef CONFIG_SND_PROC_FS
/*
* Info interface
*/
static void snd_timer_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_timer *timer;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->card && timer->card->shutdown)
continue;
switch (timer->tmr_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
snd_iprintf(buffer, "G%i: ", timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_CARD:
snd_iprintf(buffer, "C%i-%i: ",
timer->card->number, timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_PCM:
snd_iprintf(buffer, "P%i-%i-%i: ", timer->card->number,
timer->tmr_device, timer->tmr_subdevice);
break;
default:
snd_iprintf(buffer, "?%i-%i-%i-%i: ", timer->tmr_class,
timer->card ? timer->card->number : -1,
timer->tmr_device, timer->tmr_subdevice);
}
snd_iprintf(buffer, "%s :", timer->name);
if (timer->hw.resolution)
snd_iprintf(buffer, " %lu.%03luus (%lu ticks)",
timer->hw.resolution / 1000,
timer->hw.resolution % 1000,
timer->hw.ticks);
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
snd_iprintf(buffer, " SLAVE");
snd_iprintf(buffer, "\n");
list_for_each_entry(ti, &timer->open_list_head, open_list)
snd_iprintf(buffer, " Client %s : %s\n",
ti->owner ? ti->owner : "unknown",
ti->flags & (SNDRV_TIMER_IFLG_START |
SNDRV_TIMER_IFLG_RUNNING)
? "running" : "stopped");
}
mutex_unlock(®ister_mutex);
}
static struct snd_info_entry *snd_timer_proc_entry;
static void __init snd_timer_proc_init(void)
{
struct snd_info_entry *entry;
entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL);
if (entry != NULL) {
entry->c.text.read = snd_timer_proc_read;
if (snd_info_register(entry) < 0) {
snd_info_free_entry(entry);
entry = NULL;
}
}
snd_timer_proc_entry = entry;
}
static void __exit snd_timer_proc_done(void)
{
snd_info_free_entry(snd_timer_proc_entry);
}
#else /* !CONFIG_SND_PROC_FS */
#define snd_timer_proc_init()
#define snd_timer_proc_done()
#endif
/*
* USER SPACE interface
*/
static void snd_timer_user_interrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_read *r;
int prev;
spin_lock(&tu->qlock);
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->queue[prev];
if (r->resolution == resolution) {
r->ticks += ticks;
goto __wake;
}
}
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
r = &tu->queue[tu->qtail++];
tu->qtail %= tu->queue_size;
r->resolution = resolution;
r->ticks = ticks;
tu->qused++;
}
__wake:
spin_unlock(&tu->qlock);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_append_to_tqueue(struct snd_timer_user *tu,
struct snd_timer_tread *tread)
{
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread));
tu->qtail %= tu->queue_size;
tu->qused++;
}
}
static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
int event,
struct timespec *tstamp,
unsigned long resolution)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread r1;
unsigned long flags;
if (event >= SNDRV_TIMER_EVENT_START &&
event <= SNDRV_TIMER_EVENT_PAUSE)
tu->tstamp = *tstamp;
if ((tu->filter & (1 << event)) == 0 || !tu->tread)
return;
memset(&r1, 0, sizeof(r1));
r1.event = event;
r1.tstamp = *tstamp;
r1.val = resolution;
spin_lock_irqsave(&tu->qlock, flags);
snd_timer_user_append_to_tqueue(tu, &r1);
spin_unlock_irqrestore(&tu->qlock, flags);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_disconnect(struct snd_timer_instance *timeri)
{
struct snd_timer_user *tu = timeri->callback_data;
tu->disconnected = true;
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread *r, r1;
struct timespec tstamp;
int prev, append = 0;
memset(&r1, 0, sizeof(r1));
memset(&tstamp, 0, sizeof(tstamp));
spin_lock(&tu->qlock);
if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) |
(1 << SNDRV_TIMER_EVENT_TICK))) == 0) {
spin_unlock(&tu->qlock);
return;
}
if (tu->last_resolution != resolution || ticks > 0) {
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
tu->last_resolution != resolution) {
r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
r1.tstamp = tstamp;
r1.val = resolution;
snd_timer_user_append_to_tqueue(tu, &r1);
tu->last_resolution = resolution;
append++;
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0)
goto __wake;
if (ticks == 0)
goto __wake;
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->tqueue[prev];
if (r->event == SNDRV_TIMER_EVENT_TICK) {
r->tstamp = tstamp;
r->val += ticks;
append++;
goto __wake;
}
}
r1.event = SNDRV_TIMER_EVENT_TICK;
r1.tstamp = tstamp;
r1.val = ticks;
snd_timer_user_append_to_tqueue(tu, &r1);
append++;
__wake:
spin_unlock(&tu->qlock);
if (append == 0)
return;
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static int snd_timer_user_open(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (tu == NULL)
return -ENOMEM;
spin_lock_init(&tu->qlock);
init_waitqueue_head(&tu->qchange_sleep);
mutex_init(&tu->ioctl_lock);
tu->ticks = 1;
tu->queue_size = 128;
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL) {
kfree(tu);
return -ENOMEM;
}
file->private_data = tu;
return 0;
}
static int snd_timer_user_release(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
if (file->private_data) {
tu = file->private_data;
file->private_data = NULL;
mutex_lock(&tu->ioctl_lock);
if (tu->timeri)
snd_timer_close(tu->timeri);
mutex_unlock(&tu->ioctl_lock);
kfree(tu->queue);
kfree(tu->tqueue);
kfree(tu);
}
return 0;
}
static void snd_timer_user_zero_id(struct snd_timer_id *id)
{
id->dev_class = SNDRV_TIMER_CLASS_NONE;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = -1;
id->device = -1;
id->subdevice = -1;
}
static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer)
{
id->dev_class = timer->tmr_class;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = timer->card ? timer->card->number : -1;
id->device = timer->tmr_device;
id->subdevice = timer->tmr_subdevice;
}
static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
{
struct snd_timer_id id;
struct snd_timer *timer;
struct list_head *p;
if (copy_from_user(&id, _tid, sizeof(id)))
return -EFAULT;
mutex_lock(®ister_mutex);
if (id.dev_class < 0) { /* first item */
if (list_empty(&snd_timer_list))
snd_timer_user_zero_id(&id);
else {
timer = list_entry(snd_timer_list.next,
struct snd_timer, device_list);
snd_timer_user_copy_id(&id, timer);
}
} else {
switch (id.dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
id.device = id.device < 0 ? 0 : id.device + 1;
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device >= id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (id.card < 0) {
id.card = 0;
} else {
if (id.device < 0) {
id.device = 0;
} else {
if (id.subdevice < 0)
id.subdevice = 0;
else
id.subdevice++;
}
}
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > id.dev_class) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_class < id.dev_class)
continue;
if (timer->card->number > id.card) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->card->number < id.card)
continue;
if (timer->tmr_device > id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device < id.device)
continue;
if (timer->tmr_subdevice > id.subdevice) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_subdevice < id.subdevice)
continue;
snd_timer_user_copy_id(&id, timer);
break;
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
default:
snd_timer_user_zero_id(&id);
}
}
mutex_unlock(®ister_mutex);
if (copy_to_user(_tid, &id, sizeof(*_tid)))
return -EFAULT;
return 0;
}
static int snd_timer_user_ginfo(struct file *file,
struct snd_timer_ginfo __user *_ginfo)
{
struct snd_timer_ginfo *ginfo;
struct snd_timer_id tid;
struct snd_timer *t;
struct list_head *p;
int err = 0;
ginfo = memdup_user(_ginfo, sizeof(*ginfo));
if (IS_ERR(ginfo))
return PTR_ERR(ginfo);
tid = ginfo->tid;
memset(ginfo, 0, sizeof(*ginfo));
ginfo->tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
ginfo->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
ginfo->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(ginfo->id, t->id, sizeof(ginfo->id));
strlcpy(ginfo->name, t->name, sizeof(ginfo->name));
ginfo->resolution = t->hw.resolution;
if (t->hw.resolution_min > 0) {
ginfo->resolution_min = t->hw.resolution_min;
ginfo->resolution_max = t->hw.resolution_max;
}
list_for_each(p, &t->open_list_head) {
ginfo->clients++;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_ginfo, ginfo, sizeof(*ginfo)))
err = -EFAULT;
kfree(ginfo);
return err;
}
static int timer_set_gparams(struct snd_timer_gparams *gparams)
{
struct snd_timer *t;
int err;
mutex_lock(®ister_mutex);
t = snd_timer_find(&gparams->tid);
if (!t) {
err = -ENODEV;
goto _error;
}
if (!list_empty(&t->open_list_head)) {
err = -EBUSY;
goto _error;
}
if (!t->hw.set_period) {
err = -ENOSYS;
goto _error;
}
err = t->hw.set_period(t, gparams->period_num, gparams->period_den);
_error:
mutex_unlock(®ister_mutex);
return err;
}
static int snd_timer_user_gparams(struct file *file,
struct snd_timer_gparams __user *_gparams)
{
struct snd_timer_gparams gparams;
if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
return -EFAULT;
return timer_set_gparams(&gparams);
}
static int snd_timer_user_gstatus(struct file *file,
struct snd_timer_gstatus __user *_gstatus)
{
struct snd_timer_gstatus gstatus;
struct snd_timer_id tid;
struct snd_timer *t;
int err = 0;
if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus)))
return -EFAULT;
tid = gstatus.tid;
memset(&gstatus, 0, sizeof(gstatus));
gstatus.tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
if (t->hw.c_resolution)
gstatus.resolution = t->hw.c_resolution(t);
else
gstatus.resolution = t->hw.resolution;
if (t->hw.precise_resolution) {
t->hw.precise_resolution(t, &gstatus.resolution_num,
&gstatus.resolution_den);
} else {
gstatus.resolution_num = gstatus.resolution;
gstatus.resolution_den = 1000000000uL;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus)))
err = -EFAULT;
return err;
}
static int snd_timer_user_tselect(struct file *file,
struct snd_timer_select __user *_tselect)
{
struct snd_timer_user *tu;
struct snd_timer_select tselect;
char str[32];
int err = 0;
tu = file->private_data;
if (tu->timeri) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
err = -EFAULT;
goto __err;
}
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
if (err < 0)
goto __err;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
tu->tqueue = NULL;
if (tu->tread) {
tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread),
GFP_KERNEL);
if (tu->tqueue == NULL)
err = -ENOMEM;
} else {
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL)
err = -ENOMEM;
}
if (err < 0) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
} else {
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
? snd_timer_user_tinterrupt : snd_timer_user_interrupt;
tu->timeri->ccallback = snd_timer_user_ccallback;
tu->timeri->callback_data = (void *)tu;
tu->timeri->disconnect = snd_timer_user_disconnect;
}
__err:
return err;
}
static int snd_timer_user_info(struct file *file,
struct snd_timer_info __user *_info)
{
struct snd_timer_user *tu;
struct snd_timer_info *info;
struct snd_timer *t;
int err = 0;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (! info)
return -ENOMEM;
info->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
info->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(info->id, t->id, sizeof(info->id));
strlcpy(info->name, t->name, sizeof(info->name));
info->resolution = t->hw.resolution;
if (copy_to_user(_info, info, sizeof(*_info)))
err = -EFAULT;
kfree(info);
return err;
}
static int snd_timer_user_params(struct file *file,
struct snd_timer_params __user *_params)
{
struct snd_timer_user *tu;
struct snd_timer_params params;
struct snd_timer *t;
struct snd_timer_read *tr;
struct snd_timer_tread *ttr;
int err;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
if (copy_from_user(¶ms, _params, sizeof(params)))
return -EFAULT;
if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) {
u64 resolution;
if (params.ticks < 1) {
err = -EINVAL;
goto _end;
}
/* Don't allow resolution less than 1ms */
resolution = snd_timer_resolution(tu->timeri);
resolution *= params.ticks;
if (resolution < 1000000) {
err = -EINVAL;
goto _end;
}
}
if (params.queue_size > 0 &&
(params.queue_size < 32 || params.queue_size > 1024)) {
err = -EINVAL;
goto _end;
}
if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)|
(1<<SNDRV_TIMER_EVENT_TICK)|
(1<<SNDRV_TIMER_EVENT_START)|
(1<<SNDRV_TIMER_EVENT_STOP)|
(1<<SNDRV_TIMER_EVENT_CONTINUE)|
(1<<SNDRV_TIMER_EVENT_PAUSE)|
(1<<SNDRV_TIMER_EVENT_SUSPEND)|
(1<<SNDRV_TIMER_EVENT_RESUME)|
(1<<SNDRV_TIMER_EVENT_MSTART)|
(1<<SNDRV_TIMER_EVENT_MSTOP)|
(1<<SNDRV_TIMER_EVENT_MCONTINUE)|
(1<<SNDRV_TIMER_EVENT_MPAUSE)|
(1<<SNDRV_TIMER_EVENT_MSUSPEND)|
(1<<SNDRV_TIMER_EVENT_MRESUME))) {
err = -EINVAL;
goto _end;
}
snd_timer_stop(tu->timeri);
spin_lock_irq(&t->lock);
tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO|
SNDRV_TIMER_IFLG_EXCLUSIVE|
SNDRV_TIMER_IFLG_EARLY_EVENT);
if (params.flags & SNDRV_TIMER_PSFLG_AUTO)
tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO;
if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE;
if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT;
spin_unlock_irq(&t->lock);
if (params.queue_size > 0 &&
(unsigned int)tu->queue_size != params.queue_size) {
if (tu->tread) {
ttr = kmalloc(params.queue_size * sizeof(*ttr),
GFP_KERNEL);
if (ttr) {
kfree(tu->tqueue);
tu->queue_size = params.queue_size;
tu->tqueue = ttr;
}
} else {
tr = kmalloc(params.queue_size * sizeof(*tr),
GFP_KERNEL);
if (tr) {
kfree(tu->queue);
tu->queue_size = params.queue_size;
tu->queue = tr;
}
}
}
tu->qhead = tu->qtail = tu->qused = 0;
if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
if (tu->tread) {
struct snd_timer_tread tread;
memset(&tread, 0, sizeof(tread));
tread.event = SNDRV_TIMER_EVENT_EARLY;
tread.tstamp.tv_sec = 0;
tread.tstamp.tv_nsec = 0;
tread.val = 0;
snd_timer_user_append_to_tqueue(tu, &tread);
} else {
struct snd_timer_read *r = &tu->queue[0];
r->resolution = 0;
r->ticks = 0;
tu->qused++;
tu->qtail++;
}
}
tu->filter = params.filter;
tu->ticks = params.ticks;
err = 0;
_end:
if (copy_to_user(_params, ¶ms, sizeof(params)))
return -EFAULT;
return err;
}
static int snd_timer_user_status(struct file *file,
struct snd_timer_status __user *_status)
{
struct snd_timer_user *tu;
struct snd_timer_status status;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
memset(&status, 0, sizeof(status));
status.tstamp = tu->tstamp;
status.resolution = snd_timer_resolution(tu->timeri);
status.lost = tu->timeri->lost;
status.overrun = tu->overrun;
spin_lock_irq(&tu->qlock);
status.queue = tu->qused;
spin_unlock_irq(&tu->qlock);
if (copy_to_user(_status, &status, sizeof(status)))
return -EFAULT;
return 0;
}
static int snd_timer_user_start(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
snd_timer_stop(tu->timeri);
tu->timeri->lost = 0;
tu->last_resolution = 0;
return (err = snd_timer_start(tu->timeri, tu->ticks)) < 0 ? err : 0;
}
static int snd_timer_user_stop(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_stop(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_continue(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
/* start timer instead of continue if it's not used before */
if (!(tu->timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
return snd_timer_user_start(file);
tu->timeri->lost = 0;
return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_pause(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_pause(tu->timeri)) < 0 ? err : 0;
}
enum {
SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20),
SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21),
SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22),
SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
};
static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu;
void __user *argp = (void __user *)arg;
int __user *p = argp;
tu = file->private_data;
switch (cmd) {
case SNDRV_TIMER_IOCTL_PVERSION:
return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0;
case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
return snd_timer_user_next_device(argp);
case SNDRV_TIMER_IOCTL_TREAD:
{
int xarg;
if (tu->timeri) /* too late */
return -EBUSY;
if (get_user(xarg, p))
return -EFAULT;
tu->tread = xarg ? 1 : 0;
return 0;
}
case SNDRV_TIMER_IOCTL_GINFO:
return snd_timer_user_ginfo(file, argp);
case SNDRV_TIMER_IOCTL_GPARAMS:
return snd_timer_user_gparams(file, argp);
case SNDRV_TIMER_IOCTL_GSTATUS:
return snd_timer_user_gstatus(file, argp);
case SNDRV_TIMER_IOCTL_SELECT:
return snd_timer_user_tselect(file, argp);
case SNDRV_TIMER_IOCTL_INFO:
return snd_timer_user_info(file, argp);
case SNDRV_TIMER_IOCTL_PARAMS:
return snd_timer_user_params(file, argp);
case SNDRV_TIMER_IOCTL_STATUS:
return snd_timer_user_status(file, argp);
case SNDRV_TIMER_IOCTL_START:
case SNDRV_TIMER_IOCTL_START_OLD:
return snd_timer_user_start(file);
case SNDRV_TIMER_IOCTL_STOP:
case SNDRV_TIMER_IOCTL_STOP_OLD:
return snd_timer_user_stop(file);
case SNDRV_TIMER_IOCTL_CONTINUE:
case SNDRV_TIMER_IOCTL_CONTINUE_OLD:
return snd_timer_user_continue(file);
case SNDRV_TIMER_IOCTL_PAUSE:
case SNDRV_TIMER_IOCTL_PAUSE_OLD:
return snd_timer_user_pause(file);
}
return -ENOTTY;
}
static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu = file->private_data;
long ret;
mutex_lock(&tu->ioctl_lock);
ret = __snd_timer_user_ioctl(file, cmd, arg);
mutex_unlock(&tu->ioctl_lock);
return ret;
}
static int snd_timer_user_fasync(int fd, struct file * file, int on)
{
struct snd_timer_user *tu;
tu = file->private_data;
return fasync_helper(fd, file, on, &tu->fasync);
}
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
size_t count, loff_t *offset)
{
struct snd_timer_user *tu;
long result = 0, unit;
int qhead;
int err = 0;
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto _error;
}
set_current_state(TASK_INTERRUPTIBLE);
init_waitqueue_entry(&wait, current);
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
mutex_unlock(&tu->ioctl_lock);
schedule();
mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
if (tu->disconnected) {
err = -ENODEV;
goto _error;
}
if (signal_pending(current)) {
err = -ERESTARTSYS;
goto _error;
}
}
qhead = tu->qhead++;
tu->qhead %= tu->queue_size;
tu->qused--;
spin_unlock_irq(&tu->qlock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
err = -EFAULT;
} else {
if (copy_to_user(buffer, &tu->queue[qhead],
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
spin_lock_irq(&tu->qlock);
if (err < 0)
goto _error;
result += unit;
buffer += unit;
}
_error:
spin_unlock_irq(&tu->qlock);
mutex_unlock(&tu->ioctl_lock);
return result > 0 ? result : err;
}
static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
{
unsigned int mask;
struct snd_timer_user *tu;
tu = file->private_data;
poll_wait(file, &tu->qchange_sleep, wait);
mask = 0;
if (tu->qused)
mask |= POLLIN | POLLRDNORM;
if (tu->disconnected)
mask |= POLLERR;
return mask;
}
#ifdef CONFIG_COMPAT
#include "timer_compat.c"
#else
#define snd_timer_user_ioctl_compat NULL
#endif
static const struct file_operations snd_timer_f_ops =
{
.owner = THIS_MODULE,
.read = snd_timer_user_read,
.open = snd_timer_user_open,
.release = snd_timer_user_release,
.llseek = no_llseek,
.poll = snd_timer_user_poll,
.unlocked_ioctl = snd_timer_user_ioctl,
.compat_ioctl = snd_timer_user_ioctl_compat,
.fasync = snd_timer_user_fasync,
};
/* unregister the system timer */
static void snd_timer_free_all(void)
{
struct snd_timer *timer, *n;
list_for_each_entry_safe(timer, n, &snd_timer_list, device_list)
snd_timer_free(timer);
}
static struct device timer_dev;
/*
* ENTRY functions
*/
static int __init alsa_timer_init(void)
{
int err;
snd_device_initialize(&timer_dev, NULL);
dev_set_name(&timer_dev, "timer");
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1,
"system timer");
#endif
err = snd_timer_register_system();
if (err < 0) {
pr_err("ALSA: unable to register system timer (%i)\n", err);
put_device(&timer_dev);
return err;
}
err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0,
&snd_timer_f_ops, NULL, &timer_dev);
if (err < 0) {
pr_err("ALSA: unable to register timer device (%i)\n", err);
snd_timer_free_all();
put_device(&timer_dev);
return err;
}
snd_timer_proc_init();
return 0;
}
static void __exit alsa_timer_exit(void)
{
snd_unregister_device(&timer_dev);
snd_timer_free_all();
put_device(&timer_dev);
snd_timer_proc_done();
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1);
#endif
}
module_init(alsa_timer_init)
module_exit(alsa_timer_exit)
EXPORT_SYMBOL(snd_timer_open);
EXPORT_SYMBOL(snd_timer_close);
EXPORT_SYMBOL(snd_timer_resolution);
EXPORT_SYMBOL(snd_timer_start);
EXPORT_SYMBOL(snd_timer_stop);
EXPORT_SYMBOL(snd_timer_continue);
EXPORT_SYMBOL(snd_timer_pause);
EXPORT_SYMBOL(snd_timer_new);
EXPORT_SYMBOL(snd_timer_notify);
EXPORT_SYMBOL(snd_timer_global_new);
EXPORT_SYMBOL(snd_timer_global_free);
EXPORT_SYMBOL(snd_timer_global_register);
EXPORT_SYMBOL(snd_timer_interrupt);
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_2527_0 |
crossvul-cpp_data_bad_5694_0 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
* Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/stat.h>
#include <net/net_namespace.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <net/rose.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/tcp_states.h>
#include <net/ip.h>
#include <net/arp.h>
static int rose_ndevs = 10;
int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1;
int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2;
int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3;
int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE;
int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB;
int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING;
int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT;
int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC;
int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE;
static HLIST_HEAD(rose_list);
static DEFINE_SPINLOCK(rose_list_lock);
static const struct proto_ops rose_proto_ops;
ax25_address rose_callsign;
/*
* ROSE network devices are virtual network devices encapsulating ROSE
* frames into AX.25 which will be sent through an AX.25 device, so form a
* special "super class" of normal net devices; split their locks off into a
* separate class since they always nest.
*/
static struct lock_class_key rose_netdev_xmit_lock_key;
static struct lock_class_key rose_netdev_addr_lock_key;
static void rose_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
{
lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
}
static void rose_set_lockdep_key(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
}
/*
* Convert a ROSE address into text.
*/
char *rose2asc(char *buf, const rose_address *addr)
{
if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
addr->rose_addr[4] == 0x00) {
strcpy(buf, "*");
} else {
sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
addr->rose_addr[1] & 0xFF,
addr->rose_addr[2] & 0xFF,
addr->rose_addr[3] & 0xFF,
addr->rose_addr[4] & 0xFF);
}
return buf;
}
/*
* Compare two ROSE addresses, 0 == equal.
*/
int rosecmp(rose_address *addr1, rose_address *addr2)
{
int i;
for (i = 0; i < 5; i++)
if (addr1->rose_addr[i] != addr2->rose_addr[i])
return 1;
return 0;
}
/*
* Compare two ROSE addresses for only mask digits, 0 == equal.
*/
int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask)
{
unsigned int i, j;
if (mask > 10)
return 1;
for (i = 0; i < mask; i++) {
j = i / 2;
if ((i % 2) != 0) {
if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
return 1;
} else {
if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
return 1;
}
}
return 0;
}
/*
* Socket removal during an interrupt is now safe.
*/
static void rose_remove_socket(struct sock *sk)
{
spin_lock_bh(&rose_list_lock);
sk_del_node_init(sk);
spin_unlock_bh(&rose_list_lock);
}
/*
* Kill all bound sockets on a broken link layer connection to a
* particular neighbour.
*/
void rose_kill_by_neigh(struct rose_neigh *neigh)
{
struct sock *s;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (rose->neighbour == neigh) {
rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
rose->neighbour->use--;
rose->neighbour = NULL;
}
}
spin_unlock_bh(&rose_list_lock);
}
/*
* Kill all bound sockets on a dropped device.
*/
static void rose_kill_by_device(struct net_device *dev)
{
struct sock *s;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (rose->device == dev) {
rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
rose->neighbour->use--;
rose->device = NULL;
}
}
spin_unlock_bh(&rose_list_lock);
}
/*
* Handle device status changes.
*/
static int rose_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = (struct net_device *)ptr;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (event != NETDEV_DOWN)
return NOTIFY_DONE;
switch (dev->type) {
case ARPHRD_ROSE:
rose_kill_by_device(dev);
break;
case ARPHRD_AX25:
rose_link_device_down(dev);
rose_rt_device_down(dev);
break;
}
return NOTIFY_DONE;
}
/*
* Add a socket to the bound sockets list.
*/
static void rose_insert_socket(struct sock *sk)
{
spin_lock_bh(&rose_list_lock);
sk_add_node(sk, &rose_list);
spin_unlock_bh(&rose_list_lock);
}
/*
* Find a socket that wants to accept the Call Request we just
* received.
*/
static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
{
struct sock *s;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (!rosecmp(&rose->source_addr, addr) &&
!ax25cmp(&rose->source_call, call) &&
!rose->source_ndigis && s->sk_state == TCP_LISTEN)
goto found;
}
sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (!rosecmp(&rose->source_addr, addr) &&
!ax25cmp(&rose->source_call, &null_ax25_address) &&
s->sk_state == TCP_LISTEN)
goto found;
}
s = NULL;
found:
spin_unlock_bh(&rose_list_lock);
return s;
}
/*
* Find a connected ROSE socket given my LCI and device.
*/
struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
{
struct sock *s;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (rose->lci == lci && rose->neighbour == neigh)
goto found;
}
s = NULL;
found:
spin_unlock_bh(&rose_list_lock);
return s;
}
/*
* Find a unique LCI for a given device.
*/
unsigned int rose_new_lci(struct rose_neigh *neigh)
{
int lci;
if (neigh->dce_mode) {
for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
return lci;
} else {
for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
return lci;
}
return 0;
}
/*
* Deferred destroy.
*/
void rose_destroy_socket(struct sock *);
/*
* Handler for deferred kills.
*/
static void rose_destroy_timer(unsigned long data)
{
rose_destroy_socket((struct sock *)data);
}
/*
* This is called from user mode and the timers. Thus it protects itself
* against interrupt users but doesn't worry about being called during
* work. Once it is removed from the queue no interrupt or bottom half
* will touch it and we are (fairly 8-) ) safe.
*/
void rose_destroy_socket(struct sock *sk)
{
struct sk_buff *skb;
rose_remove_socket(sk);
rose_stop_heartbeat(sk);
rose_stop_idletimer(sk);
rose_stop_timer(sk);
rose_clear_queues(sk); /* Flush the queues */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/* Queue the unaccepted socket for death */
sock_set_flag(skb->sk, SOCK_DEAD);
rose_start_heartbeat(skb->sk);
rose_sk(skb->sk)->state = ROSE_STATE_0;
}
kfree_skb(skb);
}
if (sk_has_allocations(sk)) {
/* Defer: outstanding buffers */
setup_timer(&sk->sk_timer, rose_destroy_timer,
(unsigned long)sk);
sk->sk_timer.expires = jiffies + 10 * HZ;
add_timer(&sk->sk_timer);
} else
sock_put(sk);
}
/*
* Handling for system calls applied via the various interfaces to a
* ROSE socket object.
*/
static int rose_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
int opt;
if (level != SOL_ROSE)
return -ENOPROTOOPT;
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(opt, (int __user *)optval))
return -EFAULT;
switch (optname) {
case ROSE_DEFER:
rose->defer = opt ? 1 : 0;
return 0;
case ROSE_T1:
if (opt < 1)
return -EINVAL;
rose->t1 = opt * HZ;
return 0;
case ROSE_T2:
if (opt < 1)
return -EINVAL;
rose->t2 = opt * HZ;
return 0;
case ROSE_T3:
if (opt < 1)
return -EINVAL;
rose->t3 = opt * HZ;
return 0;
case ROSE_HOLDBACK:
if (opt < 1)
return -EINVAL;
rose->hb = opt * HZ;
return 0;
case ROSE_IDLE:
if (opt < 0)
return -EINVAL;
rose->idle = opt * 60 * HZ;
return 0;
case ROSE_QBITINCL:
rose->qbitincl = opt ? 1 : 0;
return 0;
default:
return -ENOPROTOOPT;
}
}
static int rose_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
int val = 0;
int len;
if (level != SOL_ROSE)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch (optname) {
case ROSE_DEFER:
val = rose->defer;
break;
case ROSE_T1:
val = rose->t1 / HZ;
break;
case ROSE_T2:
val = rose->t2 / HZ;
break;
case ROSE_T3:
val = rose->t3 / HZ;
break;
case ROSE_HOLDBACK:
val = rose->hb / HZ;
break;
case ROSE_IDLE:
val = rose->idle / (60 * HZ);
break;
case ROSE_QBITINCL:
val = rose->qbitincl;
break;
default:
return -ENOPROTOOPT;
}
len = min_t(unsigned int, len, sizeof(int));
if (put_user(len, optlen))
return -EFAULT;
return copy_to_user(optval, &val, len) ? -EFAULT : 0;
}
static int rose_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
if (sk->sk_state != TCP_LISTEN) {
struct rose_sock *rose = rose_sk(sk);
rose->dest_ndigis = 0;
memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
memset(&rose->dest_call, 0, AX25_ADDR_LEN);
memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
return 0;
}
return -EOPNOTSUPP;
}
static struct proto rose_proto = {
.name = "ROSE",
.owner = THIS_MODULE,
.obj_size = sizeof(struct rose_sock),
};
static int rose_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct rose_sock *rose;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
if (sock->type != SOCK_SEQPACKET || protocol != 0)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto);
if (sk == NULL)
return -ENOMEM;
rose = rose_sk(sk);
sock_init_data(sock, sk);
skb_queue_head_init(&rose->ack_queue);
#ifdef M_BIT
skb_queue_head_init(&rose->frag_queue);
rose->fraglen = 0;
#endif
sock->ops = &rose_proto_ops;
sk->sk_protocol = protocol;
init_timer(&rose->timer);
init_timer(&rose->idletimer);
rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout);
rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
rose->state = ROSE_STATE_0;
return 0;
}
static struct sock *rose_make_new(struct sock *osk)
{
struct sock *sk;
struct rose_sock *rose, *orose;
if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto);
if (sk == NULL)
return NULL;
rose = rose_sk(sk);
sock_init_data(NULL, sk);
skb_queue_head_init(&rose->ack_queue);
#ifdef M_BIT
skb_queue_head_init(&rose->frag_queue);
rose->fraglen = 0;
#endif
sk->sk_type = osk->sk_type;
sk->sk_priority = osk->sk_priority;
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
sk->sk_state = TCP_ESTABLISHED;
sock_copy_flags(sk, osk);
init_timer(&rose->timer);
init_timer(&rose->idletimer);
orose = rose_sk(osk);
rose->t1 = orose->t1;
rose->t2 = orose->t2;
rose->t3 = orose->t3;
rose->hb = orose->hb;
rose->idle = orose->idle;
rose->defer = orose->defer;
rose->device = orose->device;
rose->qbitincl = orose->qbitincl;
return sk;
}
static int rose_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct rose_sock *rose;
if (sk == NULL) return 0;
sock_hold(sk);
sock_orphan(sk);
lock_sock(sk);
rose = rose_sk(sk);
switch (rose->state) {
case ROSE_STATE_0:
release_sock(sk);
rose_disconnect(sk, 0, -1, -1);
lock_sock(sk);
rose_destroy_socket(sk);
break;
case ROSE_STATE_2:
rose->neighbour->use--;
release_sock(sk);
rose_disconnect(sk, 0, -1, -1);
lock_sock(sk);
rose_destroy_socket(sk);
break;
case ROSE_STATE_1:
case ROSE_STATE_3:
case ROSE_STATE_4:
case ROSE_STATE_5:
rose_clear_queues(sk);
rose_stop_idletimer(sk);
rose_write_internal(sk, ROSE_CLEAR_REQUEST);
rose_start_t3timer(sk);
rose->state = ROSE_STATE_2;
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_set_flag(sk, SOCK_DESTROY);
break;
default:
break;
}
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
return 0;
}
static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
struct net_device *dev;
ax25_address *source;
ax25_uid_assoc *user;
int n;
if (!sock_flag(sk, SOCK_ZAPPED))
return -EINVAL;
if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
return -EINVAL;
if (addr->srose_family != AF_ROSE)
return -EINVAL;
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
return -EADDRNOTAVAIL;
source = &addr->srose_call;
user = ax25_findbyuid(current_euid());
if (user) {
rose->source_call = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE))
return -EACCES;
rose->source_call = *source;
}
rose->source_addr = addr->srose_addr;
rose->device = dev;
rose->source_ndigis = addr->srose_ndigis;
if (addr_len == sizeof(struct full_sockaddr_rose)) {
struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
for (n = 0 ; n < addr->srose_ndigis ; n++)
rose->source_digis[n] = full_addr->srose_digis[n];
} else {
if (rose->source_ndigis == 1) {
rose->source_digis[0] = addr->srose_digi;
}
}
rose_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
return 0;
}
static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
unsigned char cause, diagnostic;
struct net_device *dev;
ax25_uid_assoc *user;
int n, err = 0;
if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
return -EINVAL;
if (addr->srose_family != AF_ROSE)
return -EINVAL;
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
/* Connect completed during a ERESTARTSYS event */
sock->state = SS_CONNECTED;
goto out_release;
}
if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
err = -ECONNREFUSED;
goto out_release;
}
if (sk->sk_state == TCP_ESTABLISHED) {
/* No reconnect on a seqpacket socket */
err = -EISCONN;
goto out_release;
}
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
&diagnostic, 0);
if (!rose->neighbour) {
err = -ENETUNREACH;
goto out_release;
}
rose->lci = rose_new_lci(rose->neighbour);
if (!rose->lci) {
err = -ENETUNREACH;
goto out_release;
}
if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */
sock_reset_flag(sk, SOCK_ZAPPED);
if ((dev = rose_dev_first()) == NULL) {
err = -ENETUNREACH;
goto out_release;
}
user = ax25_findbyuid(current_euid());
if (!user) {
err = -EINVAL;
goto out_release;
}
memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
rose->source_call = user->call;
rose->device = dev;
ax25_uid_put(user);
rose_insert_socket(sk); /* Finish the bind */
}
rose->dest_addr = addr->srose_addr;
rose->dest_call = addr->srose_call;
rose->rand = ((long)rose & 0xFFFF) + rose->lci;
rose->dest_ndigis = addr->srose_ndigis;
if (addr_len == sizeof(struct full_sockaddr_rose)) {
struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
for (n = 0 ; n < addr->srose_ndigis ; n++)
rose->dest_digis[n] = full_addr->srose_digis[n];
} else {
if (rose->dest_ndigis == 1) {
rose->dest_digis[0] = addr->srose_digi;
}
}
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
sk->sk_state = TCP_SYN_SENT;
rose->state = ROSE_STATE_1;
rose->neighbour->use++;
rose_write_internal(sk, ROSE_CALL_REQUEST);
rose_start_heartbeat(sk);
rose_start_t1timer(sk);
/* Now the loop */
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
err = -EINPROGRESS;
goto out_release;
}
/*
* A Connect Ack with Choke or timeout or failed routing will go to
* closed.
*/
if (sk->sk_state == TCP_SYN_SENT) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
}
if (sk->sk_state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
err = sock_error(sk); /* Always set at this point */
goto out_release;
}
sock->state = SS_CONNECTED;
out_release:
release_sock(sk);
return err;
}
static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sk_buff *skb;
struct sock *newsk;
DEFINE_WAIT(wait);
struct sock *sk;
int err = 0;
if ((sk = sock->sk) == NULL)
return -EINVAL;
lock_sock(sk);
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out_release;
}
if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out_release;
}
/*
* The write queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
if (flags & O_NONBLOCK) {
err = -EWOULDBLOCK;
break;
}
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
newsk = skb->sk;
sock_graft(newsk, newsock);
/* Now attach up the new socket */
skb->sk = NULL;
kfree_skb(skb);
sk->sk_ack_backlog--;
out_release:
release_sock(sk);
return err;
}
static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
int n;
memset(srose, 0, sizeof(*srose));
if (peer != 0) {
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
srose->srose_ndigis = rose->dest_ndigis;
for (n = 0; n < rose->dest_ndigis; n++)
srose->srose_digis[n] = rose->dest_digis[n];
} else {
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->source_addr;
srose->srose_call = rose->source_call;
srose->srose_ndigis = rose->source_ndigis;
for (n = 0; n < rose->source_ndigis; n++)
srose->srose_digis[n] = rose->source_digis[n];
}
*uaddr_len = sizeof(struct full_sockaddr_rose);
return 0;
}
int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
{
struct sock *sk;
struct sock *make;
struct rose_sock *make_rose;
struct rose_facilities_struct facilities;
int n;
skb->sk = NULL; /* Initially we don't know who it's for */
/*
* skb->data points to the rose frame start
*/
memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
&facilities)) {
rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
return 0;
}
sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
/*
* We can't accept the Call Request.
*/
if (sk == NULL || sk_acceptq_is_full(sk) ||
(make = rose_make_new(sk)) == NULL) {
rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
return 0;
}
skb->sk = make;
make->sk_state = TCP_ESTABLISHED;
make_rose = rose_sk(make);
make_rose->lci = lci;
make_rose->dest_addr = facilities.dest_addr;
make_rose->dest_call = facilities.dest_call;
make_rose->dest_ndigis = facilities.dest_ndigis;
for (n = 0 ; n < facilities.dest_ndigis ; n++)
make_rose->dest_digis[n] = facilities.dest_digis[n];
make_rose->source_addr = facilities.source_addr;
make_rose->source_call = facilities.source_call;
make_rose->source_ndigis = facilities.source_ndigis;
for (n = 0 ; n < facilities.source_ndigis ; n++)
make_rose->source_digis[n]= facilities.source_digis[n];
make_rose->neighbour = neigh;
make_rose->device = dev;
make_rose->facilities = facilities;
make_rose->neighbour->use++;
if (rose_sk(sk)->defer) {
make_rose->state = ROSE_STATE_5;
} else {
rose_write_internal(make, ROSE_CALL_ACCEPTED);
make_rose->state = ROSE_STATE_3;
rose_start_idletimer(make);
}
make_rose->condition = 0x00;
make_rose->vs = 0;
make_rose->va = 0;
make_rose->vr = 0;
make_rose->vl = 0;
sk->sk_ack_backlog++;
rose_insert_socket(make);
skb_queue_head(&sk->sk_receive_queue, skb);
rose_start_heartbeat(make);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb->len);
return 1;
}
static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name;
int err;
struct full_sockaddr_rose srose;
struct sk_buff *skb;
unsigned char *asmptr;
int n, size, qbit = 0;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
return -EINVAL;
if (sock_flag(sk, SOCK_ZAPPED))
return -EADDRNOTAVAIL;
if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
if (rose->neighbour == NULL || rose->device == NULL)
return -ENETUNREACH;
if (usrose != NULL) {
if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
return -EINVAL;
memset(&srose, 0, sizeof(struct full_sockaddr_rose));
memcpy(&srose, usrose, msg->msg_namelen);
if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
return -EISCONN;
if (srose.srose_ndigis != rose->dest_ndigis)
return -EISCONN;
if (srose.srose_ndigis == rose->dest_ndigis) {
for (n = 0 ; n < srose.srose_ndigis ; n++)
if (ax25cmp(&rose->dest_digis[n],
&srose.srose_digis[n]))
return -EISCONN;
}
if (srose.srose_family != AF_ROSE)
return -EINVAL;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
srose.srose_family = AF_ROSE;
srose.srose_addr = rose->dest_addr;
srose.srose_call = rose->dest_call;
srose.srose_ndigis = rose->dest_ndigis;
for (n = 0 ; n < rose->dest_ndigis ; n++)
srose.srose_digis[n] = rose->dest_digis[n];
}
/* Build a packet */
/* Sanity check the packet size */
if (len > 65535)
return -EMSGSIZE;
size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
return err;
skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
/*
* Put the data on the end
*/
skb_reset_transport_header(skb);
skb_put(skb, len);
err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
if (err) {
kfree_skb(skb);
return err;
}
/*
* If the Q BIT Include socket option is in force, the first
* byte of the user data is the logical value of the Q Bit.
*/
if (rose->qbitincl) {
qbit = skb->data[0];
skb_pull(skb, 1);
}
/*
* Push down the ROSE header
*/
asmptr = skb_push(skb, ROSE_MIN_LEN);
/* Build a ROSE Network header */
asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
asmptr[1] = (rose->lci >> 0) & 0xFF;
asmptr[2] = ROSE_DATA;
if (qbit)
asmptr[0] |= ROSE_Q_BIT;
if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
return -ENOTCONN;
}
#ifdef M_BIT
#define ROSE_PACLEN (256-ROSE_MIN_LEN)
if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
unsigned char header[ROSE_MIN_LEN];
struct sk_buff *skbn;
int frontlen;
int lg;
/* Save a copy of the Header */
skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
skb_pull(skb, ROSE_MIN_LEN);
frontlen = skb_headroom(skb);
while (skb->len > 0) {
if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
kfree_skb(skb);
return err;
}
skbn->sk = sk;
skbn->free = 1;
skbn->arp = 1;
skb_reserve(skbn, frontlen);
lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
/* Copy the user data */
skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
skb_pull(skb, lg);
/* Duplicate the Header */
skb_push(skbn, ROSE_MIN_LEN);
skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
if (skb->len > 0)
skbn->data[2] |= M_BIT;
skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
}
skb->free = 1;
kfree_skb(skb);
} else {
skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */
}
#else
skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */
#endif
rose_kick(sk);
return len;
}
static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
size_t copied;
unsigned char *asmptr;
struct sk_buff *skb;
int n, er, qbit;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
/* Now we can treat all alike */
if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
return er;
qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
skb_pull(skb, ROSE_MIN_LEN);
if (rose->qbitincl) {
asmptr = skb_push(skb, 1);
*asmptr = qbit;
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (srose != NULL) {
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
srose->srose_ndigis = rose->dest_ndigis;
if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
for (n = 0 ; n < rose->dest_ndigis ; n++)
full_srose->srose_digis[n] = rose->dest_digis[n];
msg->msg_namelen = sizeof(struct full_sockaddr_rose);
} else {
if (rose->dest_ndigis >= 1) {
srose->srose_ndigis = 1;
srose->srose_digi = rose->dest_digis[0];
}
msg->msg_namelen = sizeof(struct sockaddr_rose);
}
}
skb_free_datagram(sk, skb);
return copied;
}
static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
void __user *argp = (void __user *)arg;
switch (cmd) {
case TIOCOUTQ: {
long amount;
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
return put_user(amount, (unsigned int __user *) argp);
}
case TIOCINQ: {
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
return put_user(amount, (unsigned int __user *) argp);
}
case SIOCGSTAMP:
return sock_get_timestamp(sk, (struct timeval __user *) argp);
case SIOCGSTAMPNS:
return sock_get_timestampns(sk, (struct timespec __user *) argp);
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFMETRIC:
case SIOCSIFMETRIC:
return -EINVAL;
case SIOCADDRT:
case SIOCDELRT:
case SIOCRSCLRRT:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
return rose_rt_ioctl(cmd, argp);
case SIOCRSGCAUSE: {
struct rose_cause_struct rose_cause;
rose_cause.cause = rose->cause;
rose_cause.diagnostic = rose->diagnostic;
return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
}
case SIOCRSSCAUSE: {
struct rose_cause_struct rose_cause;
if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
return -EFAULT;
rose->cause = rose_cause.cause;
rose->diagnostic = rose_cause.diagnostic;
return 0;
}
case SIOCRSSL2CALL:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
ax25_listen_release(&rose_callsign, NULL);
if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
return -EFAULT;
if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
return ax25_listen_register(&rose_callsign, NULL);
return 0;
case SIOCRSGL2CALL:
return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
case SIOCRSACCEPT:
if (rose->state == ROSE_STATE_5) {
rose_write_internal(sk, ROSE_CALL_ACCEPTED);
rose_start_idletimer(sk);
rose->condition = 0x00;
rose->vs = 0;
rose->va = 0;
rose->vr = 0;
rose->vl = 0;
rose->state = ROSE_STATE_3;
}
return 0;
default:
return -ENOIOCTLCMD;
}
return 0;
}
#ifdef CONFIG_PROC_FS
static void *rose_info_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_list_lock)
{
spin_lock_bh(&rose_list_lock);
return seq_hlist_start_head(&rose_list, *pos);
}
static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &rose_list, pos);
}
static void rose_info_stop(struct seq_file *seq, void *v)
__releases(rose_list_lock)
{
spin_unlock_bh(&rose_list_lock);
}
static int rose_info_show(struct seq_file *seq, void *v)
{
char buf[11], rsbuf[11];
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
else {
struct sock *s = sk_entry(v);
struct rose_sock *rose = rose_sk(s);
const char *devname, *callsign;
const struct net_device *dev = rose->device;
if (!dev)
devname = "???";
else
devname = dev->name;
seq_printf(seq, "%-10s %-9s ",
rose2asc(rsbuf, &rose->dest_addr),
ax2asc(buf, &rose->dest_call));
if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
callsign = "??????-?";
else
callsign = ax2asc(buf, &rose->source_call);
seq_printf(seq,
"%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
rose2asc(rsbuf, &rose->source_addr),
callsign,
devname,
rose->lci & 0x0FFF,
(rose->neighbour) ? rose->neighbour->number : 0,
rose->state,
rose->vs,
rose->vr,
rose->va,
ax25_display_timer(&rose->timer) / HZ,
rose->t1 / HZ,
rose->t2 / HZ,
rose->t3 / HZ,
rose->hb / HZ,
ax25_display_timer(&rose->idletimer) / (60 * HZ),
rose->idle / (60 * HZ),
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
}
return 0;
}
static const struct seq_operations rose_info_seqops = {
.start = rose_info_start,
.next = rose_info_next,
.stop = rose_info_stop,
.show = rose_info_show,
};
static int rose_info_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_info_seqops);
}
static const struct file_operations rose_info_fops = {
.owner = THIS_MODULE,
.open = rose_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
static const struct net_proto_family rose_family_ops = {
.family = PF_ROSE,
.create = rose_create,
.owner = THIS_MODULE,
};
static const struct proto_ops rose_proto_ops = {
.family = PF_ROSE,
.owner = THIS_MODULE,
.release = rose_release,
.bind = rose_bind,
.connect = rose_connect,
.socketpair = sock_no_socketpair,
.accept = rose_accept,
.getname = rose_getname,
.poll = datagram_poll,
.ioctl = rose_ioctl,
.listen = rose_listen,
.shutdown = sock_no_shutdown,
.setsockopt = rose_setsockopt,
.getsockopt = rose_getsockopt,
.sendmsg = rose_sendmsg,
.recvmsg = rose_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static struct notifier_block rose_dev_notifier = {
.notifier_call = rose_device_event,
};
static struct net_device **dev_rose;
static struct ax25_protocol rose_pid = {
.pid = AX25_P_ROSE,
.func = rose_route_frame
};
static struct ax25_linkfail rose_linkfail_notifier = {
.func = rose_link_failed
};
static int __init rose_proto_init(void)
{
int i;
int rc;
if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
rc = -EINVAL;
goto out;
}
rc = proto_register(&rose_proto, 0);
if (rc != 0)
goto out;
rose_callsign = null_ax25_address;
dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
if (dev_rose == NULL) {
printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
rc = -ENOMEM;
goto out_proto_unregister;
}
for (i = 0; i < rose_ndevs; i++) {
struct net_device *dev;
char name[IFNAMSIZ];
sprintf(name, "rose%d", i);
dev = alloc_netdev(0, name, rose_setup);
if (!dev) {
printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
rc = -ENOMEM;
goto fail;
}
rc = register_netdev(dev);
if (rc) {
printk(KERN_ERR "ROSE: netdevice registration failed\n");
free_netdev(dev);
goto fail;
}
rose_set_lockdep_key(dev);
dev_rose[i] = dev;
}
sock_register(&rose_family_ops);
register_netdevice_notifier(&rose_dev_notifier);
ax25_register_pid(&rose_pid);
ax25_linkfail_register(&rose_linkfail_notifier);
#ifdef CONFIG_SYSCTL
rose_register_sysctl();
#endif
rose_loopback_init();
rose_add_loopback_neigh();
proc_create("rose", S_IRUGO, init_net.proc_net, &rose_info_fops);
proc_create("rose_neigh", S_IRUGO, init_net.proc_net,
&rose_neigh_fops);
proc_create("rose_nodes", S_IRUGO, init_net.proc_net,
&rose_nodes_fops);
proc_create("rose_routes", S_IRUGO, init_net.proc_net,
&rose_routes_fops);
out:
return rc;
fail:
while (--i >= 0) {
unregister_netdev(dev_rose[i]);
free_netdev(dev_rose[i]);
}
kfree(dev_rose);
out_proto_unregister:
proto_unregister(&rose_proto);
goto out;
}
module_init(rose_proto_init);
module_param(rose_ndevs, int, 0);
MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_ROSE);
static void __exit rose_exit(void)
{
int i;
remove_proc_entry("rose", init_net.proc_net);
remove_proc_entry("rose_neigh", init_net.proc_net);
remove_proc_entry("rose_nodes", init_net.proc_net);
remove_proc_entry("rose_routes", init_net.proc_net);
rose_loopback_clear();
rose_rt_free();
ax25_protocol_release(AX25_P_ROSE);
ax25_linkfail_release(&rose_linkfail_notifier);
if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
ax25_listen_release(&rose_callsign, NULL);
#ifdef CONFIG_SYSCTL
rose_unregister_sysctl();
#endif
unregister_netdevice_notifier(&rose_dev_notifier);
sock_unregister(PF_ROSE);
for (i = 0; i < rose_ndevs; i++) {
struct net_device *dev = dev_rose[i];
if (dev) {
unregister_netdev(dev);
free_netdev(dev);
}
}
kfree(dev_rose);
proto_unregister(&rose_proto);
}
module_exit(rose_exit);
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_5694_0 |
crossvul-cpp_data_good_2864_0 | /*
* FPU register's regset abstraction, for ptrace, core dumps, etc.
*/
#include <asm/fpu/internal.h>
#include <asm/fpu/signal.h>
#include <asm/fpu/regset.h>
#include <asm/fpu/xstate.h>
#include <linux/sched/task_stack.h>
/*
* The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
* as the "regset->n" for the xstate regset will be updated based on the feature
* capabilities supported by the xsave.
*/
int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
{
struct fpu *target_fpu = &target->thread.fpu;
return target_fpu->fpstate_active ? regset->n : 0;
}
int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
{
struct fpu *target_fpu = &target->thread.fpu;
if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->fpstate_active)
return regset->n;
else
return 0;
}
int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
if (!boot_cpu_has(X86_FEATURE_FXSR))
return -ENODEV;
fpu__activate_fpstate_read(fpu);
fpstate_sanitize_xstate(fpu);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&fpu->state.fxsave, 0, -1);
}
int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
int ret;
if (!boot_cpu_has(X86_FEATURE_FXSR))
return -ENODEV;
fpu__activate_fpstate_write(fpu);
fpstate_sanitize_xstate(fpu);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fpu->state.fxsave, 0, -1);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
/*
* update the header bits in the xsave header, indicating the
* presence of FP and SSE state.
*/
if (boot_cpu_has(X86_FEATURE_XSAVE))
fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
return ret;
}
int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
struct xregs_state *xsave;
int ret;
if (!boot_cpu_has(X86_FEATURE_XSAVE))
return -ENODEV;
xsave = &fpu->state.xsave;
fpu__activate_fpstate_read(fpu);
if (using_compacted_format()) {
if (kbuf)
ret = copy_xstate_to_kernel(kbuf, xsave, pos, count);
else
ret = copy_xstate_to_user(ubuf, xsave, pos, count);
} else {
fpstate_sanitize_xstate(fpu);
/*
* Copy the 48 bytes defined by the software into the xsave
* area in the thread struct, so that we can copy the whole
* area to user using one user_regset_copyout().
*/
memcpy(&xsave->i387.sw_reserved, xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
/*
* Copy the xstate memory layout.
*/
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
}
return ret;
}
int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
struct xregs_state *xsave;
int ret;
if (!boot_cpu_has(X86_FEATURE_XSAVE))
return -ENODEV;
/*
* A whole standard-format XSAVE buffer is needed:
*/
if ((pos != 0) || (count < fpu_user_xstate_size))
return -EFAULT;
xsave = &fpu->state.xsave;
fpu__activate_fpstate_write(fpu);
if (boot_cpu_has(X86_FEATURE_XSAVES)) {
if (kbuf)
ret = copy_kernel_to_xstate(xsave, kbuf);
else
ret = copy_user_to_xstate(xsave, ubuf);
} else {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
/* xcomp_bv must be 0 when using uncompacted format */
if (!ret && xsave->header.xcomp_bv)
ret = -EINVAL;
}
/*
* In case of failure, mark all states as init:
*/
if (ret)
fpstate_init(&fpu->state);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
xsave->i387.mxcsr &= mxcsr_feature_mask;
xsave->header.xfeatures &= xfeatures_mask;
/*
* These bits must be zero.
*/
memset(&xsave->header.reserved, 0, 48);
return ret;
}
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
/*
* FPU tag word conversions.
*/
static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
{
unsigned int tmp; /* to avoid 16 bit prefixes in the code */
/* Transform each pair of bits into 01 (valid) or 00 (empty) */
tmp = ~twd;
tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
/* and move the valid bits to the lower byte. */
tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
return tmp;
}
#define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
#define FP_EXP_TAG_VALID 0
#define FP_EXP_TAG_ZERO 1
#define FP_EXP_TAG_SPECIAL 2
#define FP_EXP_TAG_EMPTY 3
static inline u32 twd_fxsr_to_i387(struct fxregs_state *fxsave)
{
struct _fpxreg *st;
u32 tos = (fxsave->swd >> 11) & 7;
u32 twd = (unsigned long) fxsave->twd;
u32 tag;
u32 ret = 0xffff0000u;
int i;
for (i = 0; i < 8; i++, twd >>= 1) {
if (twd & 0x1) {
st = FPREG_ADDR(fxsave, (i - tos) & 7);
switch (st->exponent & 0x7fff) {
case 0x7fff:
tag = FP_EXP_TAG_SPECIAL;
break;
case 0x0000:
if (!st->significand[0] &&
!st->significand[1] &&
!st->significand[2] &&
!st->significand[3])
tag = FP_EXP_TAG_ZERO;
else
tag = FP_EXP_TAG_SPECIAL;
break;
default:
if (st->significand[3] & 0x8000)
tag = FP_EXP_TAG_VALID;
else
tag = FP_EXP_TAG_SPECIAL;
break;
}
} else {
tag = FP_EXP_TAG_EMPTY;
}
ret |= tag << (2 * i);
}
return ret;
}
/*
* FXSR floating point environment conversions.
*/
void
convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
{
struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
int i;
env->cwd = fxsave->cwd | 0xffff0000u;
env->swd = fxsave->swd | 0xffff0000u;
env->twd = twd_fxsr_to_i387(fxsave);
#ifdef CONFIG_X86_64
env->fip = fxsave->rip;
env->foo = fxsave->rdp;
/*
* should be actually ds/cs at fpu exception time, but
* that information is not available in 64bit mode.
*/
env->fcs = task_pt_regs(tsk)->cs;
if (tsk == current) {
savesegment(ds, env->fos);
} else {
env->fos = tsk->thread.ds;
}
env->fos |= 0xffff0000;
#else
env->fip = fxsave->fip;
env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
env->foo = fxsave->foo;
env->fos = fxsave->fos;
#endif
for (i = 0; i < 8; ++i)
memcpy(&to[i], &from[i], sizeof(to[0]));
}
void convert_to_fxsr(struct task_struct *tsk,
const struct user_i387_ia32_struct *env)
{
struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
int i;
fxsave->cwd = env->cwd;
fxsave->swd = env->swd;
fxsave->twd = twd_i387_to_fxsr(env->twd);
fxsave->fop = (u16) ((u32) env->fcs >> 16);
#ifdef CONFIG_X86_64
fxsave->rip = env->fip;
fxsave->rdp = env->foo;
/* cs and ds ignored */
#else
fxsave->fip = env->fip;
fxsave->fcs = (env->fcs & 0xffff);
fxsave->foo = env->foo;
fxsave->fos = env->fos;
#endif
for (i = 0; i < 8; ++i)
memcpy(&to[i], &from[i], sizeof(from[0]));
}
int fpregs_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
struct user_i387_ia32_struct env;
fpu__activate_fpstate_read(fpu);
if (!boot_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
if (!boot_cpu_has(X86_FEATURE_FXSR))
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&fpu->state.fsave, 0,
-1);
fpstate_sanitize_xstate(fpu);
if (kbuf && pos == 0 && count == sizeof(env)) {
convert_from_fxsr(kbuf, target);
return 0;
}
convert_from_fxsr(&env, target);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
}
int fpregs_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
struct user_i387_ia32_struct env;
int ret;
fpu__activate_fpstate_write(fpu);
fpstate_sanitize_xstate(fpu);
if (!boot_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
if (!boot_cpu_has(X86_FEATURE_FXSR))
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fpu->state.fsave, 0,
-1);
if (pos > 0 || count < sizeof(env))
convert_from_fxsr(&env, target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
if (!ret)
convert_to_fxsr(target, &env);
/*
* update the header bit in the xsave header, indicating the
* presence of FP.
*/
if (boot_cpu_has(X86_FEATURE_XSAVE))
fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FP;
return ret;
}
/*
* FPU state for core dumps.
* This is only used for a.out dumps now.
* It is declared generically using elf_fpregset_t (which is
* struct user_i387_struct) but is in fact only used for 32-bit
* dumps, so on 64-bit it is really struct user_i387_ia32_struct.
*/
int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
{
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
int fpvalid;
fpvalid = fpu->fpstate_active;
if (fpvalid)
fpvalid = !fpregs_get(tsk, NULL,
0, sizeof(struct user_i387_ia32_struct),
ufpu, NULL);
return fpvalid;
}
EXPORT_SYMBOL(dump_fpu);
#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_2864_0 |
crossvul-cpp_data_good_1694_0 | /*
md.c : Multiple Devices driver for Linux
Copyright (C) 1998, 1999, 2000 Ingo Molnar
completely rewritten, based on the MD driver code from Marc Zyngier
Changes:
- RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
- RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
- boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
- kerneld support by Boris Tobotras <boris@xtalk.msk.su>
- kmod support by: Cyrus Durgin
- RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
- Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
- lots of fixes and improvements to the RAID1/RAID5 and generic
RAID code (such as request based resynchronization):
Neil Brown <neilb@cse.unsw.edu.au>.
- persistent bitmap code
Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kthread.h>
#include <linux/blkdev.h>
#include <linux/sysctl.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/hdreg.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/file.h>
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/raid/md_p.h>
#include <linux/raid/md_u.h>
#include <linux/slab.h>
#include "md.h"
#include "bitmap.h"
#include "md-cluster.h"
#ifndef MODULE
static void autostart_arrays(int part);
#endif
/* pers_list is a list of registered personalities protected
* by pers_lock.
* pers_lock does extra service to protect accesses to
* mddev->thread when the mutex cannot be held.
*/
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
struct md_cluster_operations *md_cluster_ops;
EXPORT_SYMBOL(md_cluster_ops);
struct module *md_cluster_mod;
EXPORT_SYMBOL(md_cluster_mod);
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
static struct workqueue_struct *md_misc_wq;
static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *this);
static void mddev_detach(struct mddev *mddev);
/*
* Default number of read corrections we'll attempt on an rdev
* before ejecting it from the array. We divide the read error
* count by 2 for every hour elapsed between read errors.
*/
#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
/*
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
* is 1000 KB/sec, so the extra system load does not show up that much.
* Increase it if you want to have more _guaranteed_ speed. Note that
* the RAID driver will use the maximum available bandwidth if the IO
* subsystem is idle. There is also an 'absolute maximum' reconstruction
* speed limit - in case reconstruction slows down your system despite
* idle IO detection.
*
* you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
* or /sys/block/mdX/md/sync_speed_{min,max}
*/
static int sysctl_speed_limit_min = 1000;
static int sysctl_speed_limit_max = 200000;
static inline int speed_min(struct mddev *mddev)
{
return mddev->sync_speed_min ?
mddev->sync_speed_min : sysctl_speed_limit_min;
}
static inline int speed_max(struct mddev *mddev)
{
return mddev->sync_speed_max ?
mddev->sync_speed_max : sysctl_speed_limit_max;
}
static struct ctl_table_header *raid_table_header;
static struct ctl_table raid_table[] = {
{
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
.maxlen = sizeof(int),
.mode = S_IRUGO|S_IWUSR,
.proc_handler = proc_dointvec,
},
{
.procname = "speed_limit_max",
.data = &sysctl_speed_limit_max,
.maxlen = sizeof(int),
.mode = S_IRUGO|S_IWUSR,
.proc_handler = proc_dointvec,
},
{ }
};
static struct ctl_table raid_dir_table[] = {
{
.procname = "raid",
.maxlen = 0,
.mode = S_IRUGO|S_IXUGO,
.child = raid_table,
},
{ }
};
static struct ctl_table raid_root_table[] = {
{
.procname = "dev",
.maxlen = 0,
.mode = 0555,
.child = raid_dir_table,
},
{ }
};
static const struct block_device_operations md_fops;
static int start_readonly;
/* bio_clone_mddev
* like bio_clone, but with a local bio set
*/
struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev)
{
struct bio *b;
if (!mddev || !mddev->bio_set)
return bio_alloc(gfp_mask, nr_iovecs);
b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
if (!b)
return NULL;
return b;
}
EXPORT_SYMBOL_GPL(bio_alloc_mddev);
struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
struct mddev *mddev)
{
if (!mddev || !mddev->bio_set)
return bio_clone(bio, gfp_mask);
return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
}
EXPORT_SYMBOL_GPL(bio_clone_mddev);
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
* can use 'poll' or 'select' to find out when the event
* count increases.
*
* Events are:
* start array, stop array, error, add device, remove device,
* start build, activate spare
*/
static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
static atomic_t md_event_count;
void md_new_event(struct mddev *mddev)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
}
EXPORT_SYMBOL_GPL(md_new_event);
/* Alternate version that can be called from interrupts
* when calling sysfs_notify isn't needed.
*/
static void md_new_event_inintr(struct mddev *mddev)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
}
/*
* Enables to iterate over all existing md arrays
* all_mddevs_lock protects this list.
*/
static LIST_HEAD(all_mddevs);
static DEFINE_SPINLOCK(all_mddevs_lock);
/*
* iterates through all used mddevs in the system.
* We take care to grab the all_mddevs_lock whenever navigating
* the list, and to always hold a refcount when unlocked.
* Any code which breaks out of this loop while own
* a reference to the current mddev and must mddev_put it.
*/
#define for_each_mddev(_mddev,_tmp) \
\
for (({ spin_lock(&all_mddevs_lock); \
_tmp = all_mddevs.next; \
_mddev = NULL;}); \
({ if (_tmp != &all_mddevs) \
mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
spin_unlock(&all_mddevs_lock); \
if (_mddev) mddev_put(_mddev); \
_mddev = list_entry(_tmp, struct mddev, all_mddevs); \
_tmp != &all_mddevs;}); \
({ spin_lock(&all_mddevs_lock); \
_tmp = _tmp->next;}) \
)
/* Rather than calling directly into the personality make_request function,
* IO requests come here first so that we can check if the device is
* being suspended pending a reconfiguration.
* We hold a refcount over the call to ->make_request. By the time that
* call has finished, the bio has been linked into some internal structure
* and so is visible to ->quiesce(), so we don't need the refcount any more.
*/
static void md_make_request(struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
struct mddev *mddev = q->queuedata;
unsigned int sectors;
int cpu;
if (mddev == NULL || mddev->pers == NULL
|| !mddev->ready) {
bio_io_error(bio);
return;
}
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
return;
}
smp_rmb(); /* Ensure implications of 'active' are visible */
rcu_read_lock();
if (mddev->suspended) {
DEFINE_WAIT(__wait);
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
TASK_UNINTERRUPTIBLE);
if (!mddev->suspended)
break;
rcu_read_unlock();
schedule();
rcu_read_lock();
}
finish_wait(&mddev->sb_wait, &__wait);
}
atomic_inc(&mddev->active_io);
rcu_read_unlock();
/*
* save the sectors now since our bio can
* go away inside make_request
*/
sectors = bio_sectors(bio);
mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
part_stat_unlock();
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);
}
/* mddev_suspend makes sure no new requests are submitted
* to the device, and that any requests that have been submitted
* are completely handled.
* Once mddev_detach() is called and completes, the module will be
* completely unused.
*/
void mddev_suspend(struct mddev *mddev)
{
BUG_ON(mddev->suspended);
mddev->suspended = 1;
synchronize_rcu();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
del_timer_sync(&mddev->safemode_timer);
}
EXPORT_SYMBOL_GPL(mddev_suspend);
void mddev_resume(struct mddev *mddev)
{
mddev->suspended = 0;
wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
}
EXPORT_SYMBOL_GPL(mddev_resume);
int mddev_congested(struct mddev *mddev, int bits)
{
struct md_personality *pers = mddev->pers;
int ret = 0;
rcu_read_lock();
if (mddev->suspended)
ret = 1;
else if (pers && pers->congested)
ret = pers->congested(mddev, bits);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(mddev_congested);
static int md_congested(void *data, int bits)
{
struct mddev *mddev = data;
return mddev_congested(mddev, bits);
}
static int md_mergeable_bvec(struct request_queue *q,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
struct mddev *mddev = q->queuedata;
int ret;
rcu_read_lock();
if (mddev->suspended) {
/* Must always allow one vec */
if (bvm->bi_size == 0)
ret = biovec->bv_len;
else
ret = 0;
} else {
struct md_personality *pers = mddev->pers;
if (pers && pers->mergeable_bvec)
ret = pers->mergeable_bvec(mddev, bvm, biovec);
else
ret = biovec->bv_len;
}
rcu_read_unlock();
return ret;
}
/*
* Generic flush handling for md
*/
static void md_end_flush(struct bio *bio, int err)
{
struct md_rdev *rdev = bio->bi_private;
struct mddev *mddev = rdev->mddev;
rdev_dec_pending(rdev, mddev);
if (atomic_dec_and_test(&mddev->flush_pending)) {
/* The pre-request flush has finished */
queue_work(md_wq, &mddev->flush_work);
}
bio_put(bio);
}
static void md_submit_flush_data(struct work_struct *ws);
static void submit_flushes(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct md_rdev *rdev;
INIT_WORK(&mddev->flush_work, md_submit_flush_data);
atomic_set(&mddev->flush_pending, 1);
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags)) {
/* Take two references, one is dropped
* when request finishes, one after
* we reclaim rcu_read_lock
*/
struct bio *bi;
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
atomic_inc(&mddev->flush_pending);
submit_bio(WRITE_FLUSH, bi);
rcu_read_lock();
rdev_dec_pending(rdev, mddev);
}
rcu_read_unlock();
if (atomic_dec_and_test(&mddev->flush_pending))
queue_work(md_wq, &mddev->flush_work);
}
static void md_submit_flush_data(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct bio *bio = mddev->flush_bio;
if (bio->bi_iter.bi_size == 0)
/* an empty barrier - all done */
bio_endio(bio, 0);
else {
bio->bi_rw &= ~REQ_FLUSH;
mddev->pers->make_request(mddev, bio);
}
mddev->flush_bio = NULL;
wake_up(&mddev->sb_wait);
}
void md_flush_request(struct mddev *mddev, struct bio *bio)
{
spin_lock_irq(&mddev->lock);
wait_event_lock_irq(mddev->sb_wait,
!mddev->flush_bio,
mddev->lock);
mddev->flush_bio = bio;
spin_unlock_irq(&mddev->lock);
INIT_WORK(&mddev->flush_work, submit_flushes);
queue_work(md_wq, &mddev->flush_work);
}
EXPORT_SYMBOL(md_flush_request);
void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
struct mddev *mddev = cb->data;
md_wakeup_thread(mddev->thread);
kfree(cb);
}
EXPORT_SYMBOL(md_unplug);
static inline struct mddev *mddev_get(struct mddev *mddev)
{
atomic_inc(&mddev->active);
return mddev;
}
static void mddev_delayed_delete(struct work_struct *ws);
static void mddev_put(struct mddev *mddev)
{
struct bio_set *bs = NULL;
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
list_del_init(&mddev->all_mddevs);
bs = mddev->bio_set;
mddev->bio_set = NULL;
if (mddev->gendisk) {
/* We did a probe so need to clean up. Call
* queue_work inside the spinlock so that
* flush_workqueue() after mddev_find will
* succeed in waiting for the work to be done.
*/
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
queue_work(md_misc_wq, &mddev->del_work);
} else
kfree(mddev);
}
spin_unlock(&all_mddevs_lock);
if (bs)
bioset_free(bs);
}
void mddev_init(struct mddev *mddev)
{
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
mutex_init(&mddev->bitmap_info.mutex);
INIT_LIST_HEAD(&mddev->disks);
INIT_LIST_HEAD(&mddev->all_mddevs);
init_timer(&mddev->safemode_timer);
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
spin_lock_init(&mddev->lock);
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
init_waitqueue_head(&mddev->recovery_wait);
mddev->reshape_position = MaxSector;
mddev->reshape_backwards = 0;
mddev->last_sync_action = "none";
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE;
}
EXPORT_SYMBOL_GPL(mddev_init);
static struct mddev *mddev_find(dev_t unit)
{
struct mddev *mddev, *new = NULL;
if (unit && MAJOR(unit) != MD_MAJOR)
unit &= ~((1<<MdpMinorShift)-1);
retry:
spin_lock(&all_mddevs_lock);
if (unit) {
list_for_each_entry(mddev, &all_mddevs, all_mddevs)
if (mddev->unit == unit) {
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
kfree(new);
return mddev;
}
if (new) {
list_add(&new->all_mddevs, &all_mddevs);
spin_unlock(&all_mddevs_lock);
new->hold_active = UNTIL_IOCTL;
return new;
}
} else if (new) {
/* find an unused unit number */
static int next_minor = 512;
int start = next_minor;
int is_free = 0;
int dev = 0;
while (!is_free) {
dev = MKDEV(MD_MAJOR, next_minor);
next_minor++;
if (next_minor > MINORMASK)
next_minor = 0;
if (next_minor == start) {
/* Oh dear, all in use. */
spin_unlock(&all_mddevs_lock);
kfree(new);
return NULL;
}
is_free = 1;
list_for_each_entry(mddev, &all_mddevs, all_mddevs)
if (mddev->unit == dev) {
is_free = 0;
break;
}
}
new->unit = dev;
new->md_minor = MINOR(dev);
new->hold_active = UNTIL_STOP;
list_add(&new->all_mddevs, &all_mddevs);
spin_unlock(&all_mddevs_lock);
return new;
}
spin_unlock(&all_mddevs_lock);
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
new->unit = unit;
if (MAJOR(unit) == MD_MAJOR)
new->md_minor = MINOR(unit);
else
new->md_minor = MINOR(unit) >> MdpMinorShift;
mddev_init(new);
goto retry;
}
static struct attribute_group md_redundancy_group;
void mddev_unlock(struct mddev *mddev)
{
if (mddev->to_remove) {
/* These cannot be removed under reconfig_mutex as
* an access to the files will try to take reconfig_mutex
* while holding the file unremovable, which leads to
* a deadlock.
* So hold set sysfs_active while the remove in happeing,
* and anything else which might set ->to_remove or my
* otherwise change the sysfs namespace will fail with
* -EBUSY if sysfs_active is still set.
* We set sysfs_active under reconfig_mutex and elsewhere
* test it under the same mutex to ensure its correct value
* is seen.
*/
struct attribute_group *to_remove = mddev->to_remove;
mddev->to_remove = NULL;
mddev->sysfs_active = 1;
mutex_unlock(&mddev->reconfig_mutex);
if (mddev->kobj.sd) {
if (to_remove != &md_redundancy_group)
sysfs_remove_group(&mddev->kobj, to_remove);
if (mddev->pers == NULL ||
mddev->pers->sync_request == NULL) {
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
if (mddev->sysfs_action)
sysfs_put(mddev->sysfs_action);
mddev->sysfs_action = NULL;
}
}
mddev->sysfs_active = 0;
} else
mutex_unlock(&mddev->reconfig_mutex);
/* As we've dropped the mutex we need a spinlock to
* make sure the thread doesn't disappear
*/
spin_lock(&pers_lock);
md_wakeup_thread(mddev->thread);
spin_unlock(&pers_lock);
}
EXPORT_SYMBOL_GPL(mddev_unlock);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
{
struct md_rdev *rdev;
rdev_for_each_rcu(rdev, mddev)
if (rdev->desc_nr == nr)
return rdev;
return NULL;
}
EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
{
struct md_rdev *rdev;
rdev_for_each(rdev, mddev)
if (rdev->bdev->bd_dev == dev)
return rdev;
return NULL;
}
static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
{
struct md_rdev *rdev;
rdev_for_each_rcu(rdev, mddev)
if (rdev->bdev->bd_dev == dev)
return rdev;
return NULL;
}
static struct md_personality *find_pers(int level, char *clevel)
{
struct md_personality *pers;
list_for_each_entry(pers, &pers_list, list) {
if (level != LEVEL_NONE && pers->level == level)
return pers;
if (strcmp(pers->name, clevel)==0)
return pers;
}
return NULL;
}
/* return the offset of the super block in 512byte sectors */
static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
{
sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
return MD_NEW_SIZE_SECTORS(num_sectors);
}
static int alloc_disk_sb(struct md_rdev *rdev)
{
rdev->sb_page = alloc_page(GFP_KERNEL);
if (!rdev->sb_page) {
printk(KERN_ALERT "md: out of memory.\n");
return -ENOMEM;
}
return 0;
}
void md_rdev_clear(struct md_rdev *rdev)
{
if (rdev->sb_page) {
put_page(rdev->sb_page);
rdev->sb_loaded = 0;
rdev->sb_page = NULL;
rdev->sb_start = 0;
rdev->sectors = 0;
}
if (rdev->bb_page) {
put_page(rdev->bb_page);
rdev->bb_page = NULL;
}
kfree(rdev->badblocks.page);
rdev->badblocks.page = NULL;
}
EXPORT_SYMBOL_GPL(md_rdev_clear);
static void super_written(struct bio *bio, int error)
{
struct md_rdev *rdev = bio->bi_private;
struct mddev *mddev = rdev->mddev;
if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk("md: super_written gets error=%d, uptodate=%d\n",
error, test_bit(BIO_UPTODATE, &bio->bi_flags));
WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
md_error(mddev, rdev);
}
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
bio_put(bio);
}
void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page)
{
/* write first size bytes of page to sector of rdev
* Increment mddev->pending_writes before returning
* and decrement it on completion, waking up sb_wait
* if zero is reached.
* If an error occurred, call md_error
*/
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
atomic_inc(&mddev->pending_writes);
submit_bio(WRITE_FLUSH_FUA, bio);
}
void md_super_wait(struct mddev *mddev)
{
/* wait for all superblock writes that were scheduled to complete */
wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
}
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int rw, bool metadata_op)
{
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
int ret;
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
rdev->meta_bdev : rdev->bdev;
if (metadata_op)
bio->bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
(rdev->mddev->reshape_backwards ==
(sector >= rdev->mddev->reshape_position)))
bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
else
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
submit_bio_wait(rw, bio);
ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
bio_put(bio);
return ret;
}
EXPORT_SYMBOL_GPL(sync_page_io);
static int read_disk_sb(struct md_rdev *rdev, int size)
{
char b[BDEVNAME_SIZE];
if (rdev->sb_loaded)
return 0;
if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
goto fail;
rdev->sb_loaded = 1;
return 0;
fail:
printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
return sb1->set_uuid0 == sb2->set_uuid0 &&
sb1->set_uuid1 == sb2->set_uuid1 &&
sb1->set_uuid2 == sb2->set_uuid2 &&
sb1->set_uuid3 == sb2->set_uuid3;
}
static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
int ret;
mdp_super_t *tmp1, *tmp2;
tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
if (!tmp1 || !tmp2) {
ret = 0;
printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
goto abort;
}
*tmp1 = *sb1;
*tmp2 = *sb2;
/*
* nr_disks is not constant
*/
tmp1->nr_disks = 0;
tmp2->nr_disks = 0;
ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
abort:
kfree(tmp1);
kfree(tmp2);
return ret;
}
static u32 md_csum_fold(u32 csum)
{
csum = (csum & 0xffff) + (csum >> 16);
return (csum & 0xffff) + (csum >> 16);
}
static unsigned int calc_sb_csum(mdp_super_t *sb)
{
u64 newcsum = 0;
u32 *sb32 = (u32*)sb;
int i;
unsigned int disk_csum, csum;
disk_csum = sb->sb_csum;
sb->sb_csum = 0;
for (i = 0; i < MD_SB_BYTES/4 ; i++)
newcsum += sb32[i];
csum = (newcsum & 0xffffffff) + (newcsum>>32);
#ifdef CONFIG_ALPHA
/* This used to use csum_partial, which was wrong for several
* reasons including that different results are returned on
* different architectures. It isn't critical that we get exactly
* the same return value as before (we always csum_fold before
* testing, and that removes any differences). However as we
* know that csum_partial always returned a 16bit value on
* alphas, do a fold to maximise conformity to previous behaviour.
*/
sb->sb_csum = md_csum_fold(disk_csum);
#else
sb->sb_csum = disk_csum;
#endif
return csum;
}
/*
* Handle superblock details.
* We want to be able to handle multiple superblock formats
* so we have a common interface to them all, and an array of
* different handlers.
* We rely on user-space to write the initial superblock, and support
* reading and updating of superblocks.
* Interface methods are:
* int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
* loads and validates a superblock on dev.
* if refdev != NULL, compare superblocks on both devices
* Return:
* 0 - dev has a superblock that is compatible with refdev
* 1 - dev has a superblock that is compatible and newer than refdev
* so dev should be used as the refdev in future
* -EINVAL superblock incompatible or invalid
* -othererror e.g. -EIO
*
* int validate_super(struct mddev *mddev, struct md_rdev *dev)
* Verify that dev is acceptable into mddev.
* The first time, mddev->raid_disks will be 0, and data from
* dev should be merged in. Subsequent calls check that dev
* is new enough. Return 0 or -EINVAL
*
* void sync_super(struct mddev *mddev, struct md_rdev *dev)
* Update the superblock for rdev with data in mddev
* This does not write to disc.
*
*/
struct super_type {
char *name;
struct module *owner;
int (*load_super)(struct md_rdev *rdev,
struct md_rdev *refdev,
int minor_version);
int (*validate_super)(struct mddev *mddev,
struct md_rdev *rdev);
void (*sync_super)(struct mddev *mddev,
struct md_rdev *rdev);
unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
sector_t num_sectors);
int (*allow_new_offset)(struct md_rdev *rdev,
unsigned long long new_offset);
};
/*
* Check that the given mddev has no bitmap.
*
* This function is called from the run method of all personalities that do not
* support bitmaps. It prints an error message and returns non-zero if mddev
* has a bitmap. Otherwise, it returns 0.
*
*/
int md_check_no_bitmap(struct mddev *mddev)
{
if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
return 0;
printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
mdname(mddev), mddev->pers->name);
return 1;
}
EXPORT_SYMBOL(md_check_no_bitmap);
/*
* load_super for 0.90.0
*/
static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
{
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
mdp_super_t *sb;
int ret;
/*
* Calculate the position of the superblock (512byte sectors),
* it's at the end of the disk.
*
* It also happens to be a multiple of 4Kb.
*/
rdev->sb_start = calc_dev_sboffset(rdev);
ret = read_disk_sb(rdev, MD_SB_BYTES);
if (ret) return ret;
ret = -EINVAL;
bdevname(rdev->bdev, b);
sb = page_address(rdev->sb_page);
if (sb->md_magic != MD_SB_MAGIC) {
printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
b);
goto abort;
}
if (sb->major_version != 0 ||
sb->minor_version < 90 ||
sb->minor_version > 91) {
printk(KERN_WARNING "Bad version number %d.%d on %s\n",
sb->major_version, sb->minor_version,
b);
goto abort;
}
if (sb->raid_disks <= 0)
goto abort;
if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
b);
goto abort;
}
rdev->preferred_minor = sb->md_minor;
rdev->data_offset = 0;
rdev->new_data_offset = 0;
rdev->sb_size = MD_SB_BYTES;
rdev->badblocks.shift = -1;
if (sb->level == LEVEL_MULTIPATH)
rdev->desc_nr = -1;
else
rdev->desc_nr = sb->this_disk.number;
if (!refdev) {
ret = 1;
} else {
__u64 ev1, ev2;
mdp_super_t *refsb = page_address(refdev->sb_page);
if (!uuid_equal(refsb, sb)) {
printk(KERN_WARNING "md: %s has different UUID to %s\n",
b, bdevname(refdev->bdev,b2));
goto abort;
}
if (!sb_equal(refsb, sb)) {
printk(KERN_WARNING "md: %s has same UUID"
" but different superblock to %s\n",
b, bdevname(refdev->bdev, b2));
goto abort;
}
ev1 = md_event(sb);
ev2 = md_event(refsb);
if (ev1 > ev2)
ret = 1;
else
ret = 0;
}
rdev->sectors = rdev->sb_start;
/* Limit to 4TB as metadata cannot record more than that.
* (not needed for Linear and RAID0 as metadata doesn't
* record this size)
*/
if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
rdev->sectors = (2ULL << 32) - 2;
if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
/* "this cannot possibly happen" ... */
ret = -EINVAL;
abort:
return ret;
}
/*
* validate_super for 0.90.0
*/
static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
{
mdp_disk_t *desc;
mdp_super_t *sb = page_address(rdev->sb_page);
__u64 ev1 = md_event(sb);
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
mddev->major_version = 0;
mddev->minor_version = sb->minor_version;
mddev->patch_version = sb->patch_version;
mddev->external = 0;
mddev->chunk_sectors = sb->chunk_size >> 9;
mddev->ctime = sb->ctime;
mddev->utime = sb->utime;
mddev->level = sb->level;
mddev->clevel[0] = 0;
mddev->layout = sb->layout;
mddev->raid_disks = sb->raid_disks;
mddev->dev_sectors = ((sector_t)sb->size) * 2;
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.space = 0;
/* bitmap can use 60 K after the 4K superblocks */
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
mddev->reshape_backwards = 0;
if (mddev->minor_version >= 91) {
mddev->reshape_position = sb->reshape_position;
mddev->delta_disks = sb->delta_disks;
mddev->new_level = sb->new_level;
mddev->new_layout = sb->new_layout;
mddev->new_chunk_sectors = sb->new_chunk >> 9;
if (mddev->delta_disks < 0)
mddev->reshape_backwards = 1;
} else {
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
if (sb->state & (1<<MD_SB_CLEAN))
mddev->recovery_cp = MaxSector;
else {
if (sb->events_hi == sb->cp_events_hi &&
sb->events_lo == sb->cp_events_lo) {
mddev->recovery_cp = sb->recovery_cp;
} else
mddev->recovery_cp = 0;
}
memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
mddev->max_disks = MD_SB_DISKS;
if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
mddev->bitmap_info.file == NULL) {
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
}
} else if (mddev->pers == NULL) {
/* Insist on good event counter while assembling, except
* for spares (which don't need an event count) */
++ev1;
if (sb->disks[rdev->desc_nr].state & (
(1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
if (ev1 < mddev->events)
return -EINVAL;
} else if (mddev->bitmap) {
/* if adding to array with a bitmap, then we can accept an
* older device ... but not too old.
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
if (ev1 < mddev->events)
set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
return 0;
}
if (mddev->level != LEVEL_MULTIPATH) {
desc = sb->disks + rdev->desc_nr;
if (desc->state & (1<<MD_DISK_FAULTY))
set_bit(Faulty, &rdev->flags);
else if (desc->state & (1<<MD_DISK_SYNC) /* &&
desc->raid_disk < mddev->raid_disks */) {
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = desc->raid_disk;
rdev->saved_raid_disk = desc->raid_disk;
} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
/* active but not in sync implies recovery up to
* reshape position. We don't know exactly where
* that is, so set to zero for now */
if (mddev->minor_version >= 91) {
rdev->recovery_offset = 0;
rdev->raid_disk = desc->raid_disk;
}
}
if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
return 0;
}
/*
* sync_super for 0.90.0
*/
static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
{
mdp_super_t *sb;
struct md_rdev *rdev2;
int next_spare = mddev->raid_disks;
/* make rdev->sb match mddev data..
*
* 1/ zero out disks
* 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
* 3/ any empty disks < next_spare become removed
*
* disks[0] gets initialised to REMOVED because
* we cannot be sure from other fields if it has
* been initialised or not.
*/
int i;
int active=0, working=0,failed=0,spare=0,nr_disks=0;
rdev->sb_size = MD_SB_BYTES;
sb = page_address(rdev->sb_page);
memset(sb, 0, sizeof(*sb));
sb->md_magic = MD_SB_MAGIC;
sb->major_version = mddev->major_version;
sb->patch_version = mddev->patch_version;
sb->gvalid_words = 0; /* ignored */
memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
memcpy(&sb->set_uuid3, mddev->uuid+12,4);
sb->ctime = mddev->ctime;
sb->level = mddev->level;
sb->size = mddev->dev_sectors / 2;
sb->raid_disks = mddev->raid_disks;
sb->md_minor = mddev->md_minor;
sb->not_persistent = 0;
sb->utime = mddev->utime;
sb->state = 0;
sb->events_hi = (mddev->events>>32);
sb->events_lo = (u32)mddev->events;
if (mddev->reshape_position == MaxSector)
sb->minor_version = 90;
else {
sb->minor_version = 91;
sb->reshape_position = mddev->reshape_position;
sb->new_level = mddev->new_level;
sb->delta_disks = mddev->delta_disks;
sb->new_layout = mddev->new_layout;
sb->new_chunk = mddev->new_chunk_sectors << 9;
}
mddev->minor_version = sb->minor_version;
if (mddev->in_sync)
{
sb->recovery_cp = mddev->recovery_cp;
sb->cp_events_hi = (mddev->events>>32);
sb->cp_events_lo = (u32)mddev->events;
if (mddev->recovery_cp == MaxSector)
sb->state = (1<< MD_SB_CLEAN);
} else
sb->recovery_cp = 0;
sb->layout = mddev->layout;
sb->chunk_size = mddev->chunk_sectors << 9;
if (mddev->bitmap && mddev->bitmap_info.file == NULL)
sb->state |= (1<<MD_SB_BITMAP_PRESENT);
sb->disks[0].state = (1<<MD_DISK_REMOVED);
rdev_for_each(rdev2, mddev) {
mdp_disk_t *d;
int desc_nr;
int is_active = test_bit(In_sync, &rdev2->flags);
if (rdev2->raid_disk >= 0 &&
sb->minor_version >= 91)
/* we have nowhere to store the recovery_offset,
* but if it is not below the reshape_position,
* we can piggy-back on that.
*/
is_active = 1;
if (rdev2->raid_disk < 0 ||
test_bit(Faulty, &rdev2->flags))
is_active = 0;
if (is_active)
desc_nr = rdev2->raid_disk;
else
desc_nr = next_spare++;
rdev2->desc_nr = desc_nr;
d = &sb->disks[rdev2->desc_nr];
nr_disks++;
d->number = rdev2->desc_nr;
d->major = MAJOR(rdev2->bdev->bd_dev);
d->minor = MINOR(rdev2->bdev->bd_dev);
if (is_active)
d->raid_disk = rdev2->raid_disk;
else
d->raid_disk = rdev2->desc_nr; /* compatibility */
if (test_bit(Faulty, &rdev2->flags))
d->state = (1<<MD_DISK_FAULTY);
else if (is_active) {
d->state = (1<<MD_DISK_ACTIVE);
if (test_bit(In_sync, &rdev2->flags))
d->state |= (1<<MD_DISK_SYNC);
active++;
working++;
} else {
d->state = 0;
spare++;
working++;
}
if (test_bit(WriteMostly, &rdev2->flags))
d->state |= (1<<MD_DISK_WRITEMOSTLY);
}
/* now set the "removed" and "faulty" bits on any missing devices */
for (i=0 ; i < mddev->raid_disks ; i++) {
mdp_disk_t *d = &sb->disks[i];
if (d->state == 0 && d->number == 0) {
d->number = i;
d->raid_disk = i;
d->state = (1<<MD_DISK_REMOVED);
d->state |= (1<<MD_DISK_FAULTY);
failed++;
}
}
sb->nr_disks = nr_disks;
sb->active_disks = active;
sb->working_disks = working;
sb->failed_disks = failed;
sb->spare_disks = spare;
sb->this_disk = sb->disks[rdev->desc_nr];
sb->sb_csum = calc_sb_csum(sb);
}
/*
* rdev_size_change for 0.90.0
*/
static unsigned long long
super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
{
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
return 0; /* component must fit device */
if (rdev->mddev->bitmap_info.offset)
return 0; /* can't move bitmap */
rdev->sb_start = calc_dev_sboffset(rdev);
if (!num_sectors || num_sectors > rdev->sb_start)
num_sectors = rdev->sb_start;
/* Limit to 4TB as metadata cannot record more than that.
* 4TB == 2^32 KB, or 2*2^32 sectors.
*/
if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
num_sectors = (2ULL << 32) - 2;
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
return num_sectors;
}
static int
super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
{
/* non-zero offset changes not possible with v0.90 */
return new_offset == 0;
}
/*
* version 1 superblock
*/
static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
{
__le32 disk_csum;
u32 csum;
unsigned long long newcsum;
int size = 256 + le32_to_cpu(sb->max_dev)*2;
__le32 *isuper = (__le32*)sb;
disk_csum = sb->sb_csum;
sb->sb_csum = 0;
newcsum = 0;
for (; size >= 4; size -= 4)
newcsum += le32_to_cpu(*isuper++);
if (size == 2)
newcsum += le16_to_cpu(*(__le16*) isuper);
csum = (newcsum & 0xffffffff) + (newcsum >> 32);
sb->sb_csum = disk_csum;
return cpu_to_le32(csum);
}
static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
int acknowledged);
static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
{
struct mdp_superblock_1 *sb;
int ret;
sector_t sb_start;
sector_t sectors;
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
int bmask;
/*
* Calculate the position of the superblock in 512byte sectors.
* It is always aligned to a 4K boundary and
* depeding on minor_version, it can be:
* 0: At least 8K, but less than 12K, from end of device
* 1: At start of device
* 2: 4K from start of device.
*/
switch(minor_version) {
case 0:
sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
sb_start -= 8*2;
sb_start &= ~(sector_t)(4*2-1);
break;
case 1:
sb_start = 0;
break;
case 2:
sb_start = 8;
break;
default:
return -EINVAL;
}
rdev->sb_start = sb_start;
/* superblock is rarely larger than 1K, but it can be larger,
* and it is safe to read 4k, so we do that
*/
ret = read_disk_sb(rdev, 4096);
if (ret) return ret;
sb = page_address(rdev->sb_page);
if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
sb->major_version != cpu_to_le32(1) ||
le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
le64_to_cpu(sb->super_offset) != rdev->sb_start ||
(le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
return -EINVAL;
if (calc_sb_1_csum(sb) != sb->sb_csum) {
printk("md: invalid superblock checksum on %s\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
if (le64_to_cpu(sb->data_size) < 10) {
printk("md: data_size too small on %s\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
if (sb->pad0 ||
sb->pad3[0] ||
memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
/* Some padding is non-zero, might be a new feature */
return -EINVAL;
rdev->preferred_minor = 0xffff;
rdev->data_offset = le64_to_cpu(sb->data_offset);
rdev->new_data_offset = rdev->data_offset;
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
(le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
if (minor_version
&& rdev->data_offset < sb_start + (rdev->sb_size/512))
return -EINVAL;
if (minor_version
&& rdev->new_data_offset < sb_start + (rdev->sb_size/512))
return -EINVAL;
if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
rdev->desc_nr = -1;
else
rdev->desc_nr = le32_to_cpu(sb->dev_number);
if (!rdev->bb_page) {
rdev->bb_page = alloc_page(GFP_KERNEL);
if (!rdev->bb_page)
return -ENOMEM;
}
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
rdev->badblocks.count == 0) {
/* need to load the bad block list.
* Currently we limit it to one page.
*/
s32 offset;
sector_t bb_sector;
u64 *bbp;
int i;
int sectors = le16_to_cpu(sb->bblog_size);
if (sectors > (PAGE_SIZE / 512))
return -EINVAL;
offset = le32_to_cpu(sb->bblog_offset);
if (offset == 0)
return -EINVAL;
bb_sector = (long long)offset;
if (!sync_page_io(rdev, bb_sector, sectors << 9,
rdev->bb_page, READ, true))
return -EIO;
bbp = (u64 *)page_address(rdev->bb_page);
rdev->badblocks.shift = sb->bblog_shift;
for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
u64 bb = le64_to_cpu(*bbp);
int count = bb & (0x3ff);
u64 sector = bb >> 10;
sector <<= sb->bblog_shift;
count <<= sb->bblog_shift;
if (bb + 1 == 0)
break;
if (md_set_badblocks(&rdev->badblocks,
sector, count, 1) == 0)
return -EINVAL;
}
} else if (sb->bblog_offset != 0)
rdev->badblocks.shift = 0;
if (!refdev) {
ret = 1;
} else {
__u64 ev1, ev2;
struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
sb->level != refsb->level ||
sb->layout != refsb->layout ||
sb->chunksize != refsb->chunksize) {
printk(KERN_WARNING "md: %s has strangely different"
" superblock to %s\n",
bdevname(rdev->bdev,b),
bdevname(refdev->bdev,b2));
return -EINVAL;
}
ev1 = le64_to_cpu(sb->events);
ev2 = le64_to_cpu(refsb->events);
if (ev1 > ev2)
ret = 1;
else
ret = 0;
}
if (minor_version) {
sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
sectors -= rdev->data_offset;
} else
sectors = rdev->sb_start;
if (sectors < le64_to_cpu(sb->data_size))
return -EINVAL;
rdev->sectors = le64_to_cpu(sb->data_size);
return ret;
}
static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
{
struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
__u64 ev1 = le64_to_cpu(sb->events);
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
mddev->major_version = 1;
mddev->patch_version = 0;
mddev->external = 0;
mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
mddev->level = le32_to_cpu(sb->level);
mddev->clevel[0] = 0;
mddev->layout = le32_to_cpu(sb->layout);
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
mddev->dev_sectors = le64_to_cpu(sb->size);
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.space = 0;
/* Default location for bitmap is 1K after superblock
* using 3K - total of 4K
*/
mddev->bitmap_info.default_offset = 1024 >> 9;
mddev->bitmap_info.default_space = (4096-1024) >> 9;
mddev->reshape_backwards = 0;
mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
memcpy(mddev->uuid, sb->set_uuid, 16);
mddev->max_disks = (4096-256)/2;
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
mddev->bitmap_info.file == NULL) {
mddev->bitmap_info.offset =
(__s32)le32_to_cpu(sb->bitmap_offset);
/* Metadata doesn't record how much space is available.
* For 1.0, we assume we can use up to the superblock
* if before, else to 4K beyond superblock.
* For others, assume no change is possible.
*/
if (mddev->minor_version > 0)
mddev->bitmap_info.space = 0;
else if (mddev->bitmap_info.offset > 0)
mddev->bitmap_info.space =
8 - mddev->bitmap_info.offset;
else
mddev->bitmap_info.space =
-mddev->bitmap_info.offset;
}
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
mddev->reshape_position = le64_to_cpu(sb->reshape_position);
mddev->delta_disks = le32_to_cpu(sb->delta_disks);
mddev->new_level = le32_to_cpu(sb->new_level);
mddev->new_layout = le32_to_cpu(sb->new_layout);
mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
if (mddev->delta_disks < 0 ||
(mddev->delta_disks == 0 &&
(le32_to_cpu(sb->feature_map)
& MD_FEATURE_RESHAPE_BACKWARDS)))
mddev->reshape_backwards = 1;
} else {
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
} else if (mddev->pers == NULL) {
/* Insist of good event counter while assembling, except for
* spares (which don't need an event count) */
++ev1;
if (rdev->desc_nr >= 0 &&
rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
if (ev1 < mddev->events)
return -EINVAL;
} else if (mddev->bitmap) {
/* If adding to array with a bitmap, then we can accept an
* older device, but not too old.
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
if (ev1 < mddev->events)
set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
return 0;
}
if (mddev->level != LEVEL_MULTIPATH) {
int role;
if (rdev->desc_nr < 0 ||
rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
role = 0xffff;
rdev->desc_nr = -1;
} else
role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
switch(role) {
case 0xffff: /* spare */
break;
case 0xfffe: /* faulty */
set_bit(Faulty, &rdev->flags);
break;
default:
rdev->saved_raid_disk = role;
if ((le32_to_cpu(sb->feature_map) &
MD_FEATURE_RECOVERY_OFFSET)) {
rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
if (!(le32_to_cpu(sb->feature_map) &
MD_FEATURE_RECOVERY_BITMAP))
rdev->saved_raid_disk = -1;
} else
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = role;
break;
}
if (sb->devflags & WriteMostly1)
set_bit(WriteMostly, &rdev->flags);
if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
set_bit(Replacement, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
return 0;
}
static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
{
struct mdp_superblock_1 *sb;
struct md_rdev *rdev2;
int max_dev, i;
/* make rdev->sb match mddev and rdev data. */
sb = page_address(rdev->sb_page);
sb->feature_map = 0;
sb->pad0 = 0;
sb->recovery_offset = cpu_to_le64(0);
memset(sb->pad3, 0, sizeof(sb->pad3));
sb->utime = cpu_to_le64((__u64)mddev->utime);
sb->events = cpu_to_le64(mddev->events);
if (mddev->in_sync)
sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
else
sb->resync_offset = cpu_to_le64(0);
sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
sb->raid_disks = cpu_to_le32(mddev->raid_disks);
sb->size = cpu_to_le64(mddev->dev_sectors);
sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
if (test_bit(WriteMostly, &rdev->flags))
sb->devflags |= WriteMostly1;
else
sb->devflags &= ~WriteMostly1;
sb->data_offset = cpu_to_le64(rdev->data_offset);
sb->data_size = cpu_to_le64(rdev->sectors);
if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
}
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags)) {
sb->feature_map |=
cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
sb->recovery_offset =
cpu_to_le64(rdev->recovery_offset);
if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
sb->feature_map |=
cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
}
if (test_bit(Replacement, &rdev->flags))
sb->feature_map |=
cpu_to_le32(MD_FEATURE_REPLACEMENT);
if (mddev->reshape_position != MaxSector) {
sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
sb->reshape_position = cpu_to_le64(mddev->reshape_position);
sb->new_layout = cpu_to_le32(mddev->new_layout);
sb->delta_disks = cpu_to_le32(mddev->delta_disks);
sb->new_level = cpu_to_le32(mddev->new_level);
sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
if (mddev->delta_disks == 0 &&
mddev->reshape_backwards)
sb->feature_map
|= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
if (rdev->new_data_offset != rdev->data_offset) {
sb->feature_map
|= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
- rdev->data_offset));
}
}
if (rdev->badblocks.count == 0)
/* Nothing to do for bad blocks*/ ;
else if (sb->bblog_offset == 0)
/* Cannot record bad blocks on this device */
md_error(mddev, rdev);
else {
struct badblocks *bb = &rdev->badblocks;
u64 *bbp = (u64 *)page_address(rdev->bb_page);
u64 *p = bb->page;
sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
if (bb->changed) {
unsigned seq;
retry:
seq = read_seqbegin(&bb->lock);
memset(bbp, 0xff, PAGE_SIZE);
for (i = 0 ; i < bb->count ; i++) {
u64 internal_bb = p[i];
u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
| BB_LEN(internal_bb));
bbp[i] = cpu_to_le64(store_bb);
}
bb->changed = 0;
if (read_seqretry(&bb->lock, seq))
goto retry;
bb->sector = (rdev->sb_start +
(int)le32_to_cpu(sb->bblog_offset));
bb->size = le16_to_cpu(sb->bblog_size);
}
}
max_dev = 0;
rdev_for_each(rdev2, mddev)
if (rdev2->desc_nr+1 > max_dev)
max_dev = rdev2->desc_nr+1;
if (max_dev > le32_to_cpu(sb->max_dev)) {
int bmask;
sb->max_dev = cpu_to_le32(max_dev);
rdev->sb_size = max_dev * 2 + 256;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
} else
max_dev = le32_to_cpu(sb->max_dev);
for (i=0; i<max_dev;i++)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
rdev_for_each(rdev2, mddev) {
i = rdev2->desc_nr;
if (test_bit(Faulty, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(0xfffe);
else if (test_bit(In_sync, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else if (rdev2->raid_disk >= 0)
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else
sb->dev_roles[i] = cpu_to_le16(0xffff);
}
sb->sb_csum = calc_sb_1_csum(sb);
}
static unsigned long long
super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
{
struct mdp_superblock_1 *sb;
sector_t max_sectors;
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
return 0; /* component must fit device */
if (rdev->data_offset != rdev->new_data_offset)
return 0; /* too confusing */
if (rdev->sb_start < rdev->data_offset) {
/* minor versions 1 and 2; superblock before data */
max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
max_sectors -= rdev->data_offset;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
} else if (rdev->mddev->bitmap_info.offset) {
/* minor version 0 with bitmap we can't move */
return 0;
} else {
/* minor version 0; superblock after data */
sector_t sb_start;
sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
sb_start &= ~(sector_t)(4*2 - 1);
max_sectors = rdev->sectors + sb_start - rdev->sb_start;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
rdev->sb_start = sb_start;
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
sb->super_offset = rdev->sb_start;
sb->sb_csum = calc_sb_1_csum(sb);
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
return num_sectors;
}
static int
super_1_allow_new_offset(struct md_rdev *rdev,
unsigned long long new_offset)
{
/* All necessary checks on new >= old have been done */
struct bitmap *bitmap;
if (new_offset >= rdev->data_offset)
return 1;
/* with 1.0 metadata, there is no metadata to tread on
* so we can always move back */
if (rdev->mddev->minor_version == 0)
return 1;
/* otherwise we must be sure not to step on
* any metadata, so stay:
* 36K beyond start of superblock
* beyond end of badblocks
* beyond write-intent bitmap
*/
if (rdev->sb_start + (32+4)*2 > new_offset)
return 0;
bitmap = rdev->mddev->bitmap;
if (bitmap && !rdev->mddev->bitmap_info.file &&
rdev->sb_start + rdev->mddev->bitmap_info.offset +
bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
return 0;
if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
return 0;
return 1;
}
static struct super_type super_types[] = {
[0] = {
.name = "0.90.0",
.owner = THIS_MODULE,
.load_super = super_90_load,
.validate_super = super_90_validate,
.sync_super = super_90_sync,
.rdev_size_change = super_90_rdev_size_change,
.allow_new_offset = super_90_allow_new_offset,
},
[1] = {
.name = "md-1",
.owner = THIS_MODULE,
.load_super = super_1_load,
.validate_super = super_1_validate,
.sync_super = super_1_sync,
.rdev_size_change = super_1_rdev_size_change,
.allow_new_offset = super_1_allow_new_offset,
},
};
static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
{
if (mddev->sync_super) {
mddev->sync_super(mddev, rdev);
return;
}
BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
super_types[mddev->major_version].sync_super(mddev, rdev);
}
static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
{
struct md_rdev *rdev, *rdev2;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev1)
rdev_for_each_rcu(rdev2, mddev2)
if (rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
rcu_read_unlock();
return 1;
}
rcu_read_unlock();
return 0;
}
static LIST_HEAD(pending_raid_disks);
/*
* Try to register data integrity profile for an mddev
*
* This is called when an array is started and after a disk has been kicked
* from the array. It only succeeds if all working and active component devices
* are integrity capable with matching profiles.
*/
int md_integrity_register(struct mddev *mddev)
{
struct md_rdev *rdev, *reference = NULL;
if (list_empty(&mddev->disks))
return 0; /* nothing to do */
if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
return 0; /* shouldn't register, or already is */
rdev_for_each(rdev, mddev) {
/* skip spares and non-functional disks */
if (test_bit(Faulty, &rdev->flags))
continue;
if (rdev->raid_disk < 0)
continue;
if (!reference) {
/* Use the first rdev as the reference */
reference = rdev;
continue;
}
/* does this rdev's profile match the reference profile? */
if (blk_integrity_compare(reference->bdev->bd_disk,
rdev->bdev->bd_disk) < 0)
return -EINVAL;
}
if (!reference || !bdev_get_integrity(reference->bdev))
return 0;
/*
* All component devices are integrity capable and have matching
* profiles, register the common profile for the md device.
*/
if (blk_integrity_register(mddev->gendisk,
bdev_get_integrity(reference->bdev)) != 0) {
printk(KERN_ERR "md: failed to register integrity for %s\n",
mdname(mddev));
return -EINVAL;
}
printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
printk(KERN_ERR "md: failed to create integrity pool for %s\n",
mdname(mddev));
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(md_integrity_register);
/* Disable data integrity if non-capable/non-matching disk is being added */
void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
struct blk_integrity *bi_rdev;
struct blk_integrity *bi_mddev;
if (!mddev->gendisk)
return;
bi_rdev = bdev_get_integrity(rdev->bdev);
bi_mddev = blk_get_integrity(mddev->gendisk);
if (!bi_mddev) /* nothing to do */
return;
if (rdev->raid_disk < 0) /* skip spares */
return;
if (bi_rdev && blk_integrity_compare(mddev->gendisk,
rdev->bdev->bd_disk) >= 0)
return;
printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
blk_integrity_unregister(mddev->gendisk);
}
EXPORT_SYMBOL(md_integrity_add_rdev);
static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
{
char b[BDEVNAME_SIZE];
struct kobject *ko;
int err;
/* prevent duplicates */
if (find_rdev(mddev, rdev->bdev->bd_dev))
return -EEXIST;
/* make sure rdev->sectors exceeds mddev->dev_sectors */
if (rdev->sectors && (mddev->dev_sectors == 0 ||
rdev->sectors < mddev->dev_sectors)) {
if (mddev->pers) {
/* Cannot change size, so fail
* If mddev->level <= 0, then we don't care
* about aligning sizes (e.g. linear)
*/
if (mddev->level > 0)
return -ENOSPC;
} else
mddev->dev_sectors = rdev->sectors;
}
/* Verify rdev->desc_nr is unique.
* If it is -1, assign a free number, else
* check number is not in use
*/
rcu_read_lock();
if (rdev->desc_nr < 0) {
int choice = 0;
if (mddev->pers)
choice = mddev->raid_disks;
while (md_find_rdev_nr_rcu(mddev, choice))
choice++;
rdev->desc_nr = choice;
} else {
if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
rcu_read_unlock();
return -EBUSY;
}
}
rcu_read_unlock();
if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
mdname(mddev), mddev->max_disks);
return -EBUSY;
}
bdevname(rdev->bdev,b);
strreplace(b, '/', '!');
rdev->mddev = mddev;
printk(KERN_INFO "md: bind<%s>\n", b);
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
if (sysfs_create_link(&rdev->kobj, ko, "block"))
/* failure here is OK */;
rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
list_add_rcu(&rdev->same_set, &mddev->disks);
bd_link_disk_holder(rdev->bdev, mddev->gendisk);
/* May as well allow recovery to be retried once */
mddev->recovery_disabled++;
return 0;
fail:
printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
b, mdname(mddev));
return err;
}
static void md_delayed_delete(struct work_struct *ws)
{
struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
kobject_del(&rdev->kobj);
kobject_put(&rdev->kobj);
}
static void unbind_rdev_from_array(struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
rdev->sysfs_state = NULL;
rdev->badblocks.count = 0;
/* We need to delay this, otherwise we can deadlock when
* writing to 'remove' to "dev/state". We also need
* to delay it due to rcu usage.
*/
synchronize_rcu();
INIT_WORK(&rdev->del_work, md_delayed_delete);
kobject_get(&rdev->kobj);
queue_work(md_misc_wq, &rdev->del_work);
}
/*
* prevent the device from being mounted, repartitioned or
* otherwise reused by a RAID array (or any other kernel
* subsystem), by bd_claiming the device.
*/
static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
{
int err = 0;
struct block_device *bdev;
char b[BDEVNAME_SIZE];
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
shared ? (struct md_rdev *)lock_rdev : rdev);
if (IS_ERR(bdev)) {
printk(KERN_ERR "md: could not open %s.\n",
__bdevname(dev, b));
return PTR_ERR(bdev);
}
rdev->bdev = bdev;
return err;
}
static void unlock_rdev(struct md_rdev *rdev)
{
struct block_device *bdev = rdev->bdev;
rdev->bdev = NULL;
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
void md_autodetect_dev(dev_t dev);
static void export_rdev(struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: export_rdev(%s)\n",
bdevname(rdev->bdev,b));
md_rdev_clear(rdev);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
#endif
unlock_rdev(rdev);
kobject_put(&rdev->kobj);
}
void md_kick_rdev_from_array(struct md_rdev *rdev)
{
unbind_rdev_from_array(rdev);
export_rdev(rdev);
}
EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
static void export_array(struct mddev *mddev)
{
struct md_rdev *rdev;
while (!list_empty(&mddev->disks)) {
rdev = list_first_entry(&mddev->disks, struct md_rdev,
same_set);
md_kick_rdev_from_array(rdev);
}
mddev->raid_disks = 0;
mddev->major_version = 0;
}
static void sync_sbs(struct mddev *mddev, int nospares)
{
/* Update each superblock (in-memory image), but
* if we are allowed to, skip spares which already
* have the right event counter, or have one earlier
* (which would mean they aren't being marked as dirty
* with the rest of the array)
*/
struct md_rdev *rdev;
rdev_for_each(rdev, mddev) {
if (rdev->sb_events == mddev->events ||
(nospares &&
rdev->raid_disk < 0 &&
rdev->sb_events+1 == mddev->events)) {
/* Don't update this superblock */
rdev->sb_loaded = 2;
} else {
sync_super(mddev, rdev);
rdev->sb_loaded = 1;
}
}
}
void md_update_sb(struct mddev *mddev, int force_change)
{
struct md_rdev *rdev;
int sync_req;
int nospares = 0;
int any_badblocks_changed = 0;
if (mddev->ro) {
if (force_change)
set_bit(MD_CHANGE_DEVS, &mddev->flags);
return;
}
repeat:
/* First make sure individual recovery_offsets are correct */
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk >= 0 &&
mddev->delta_disks >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
mddev->curr_resync_completed > rdev->recovery_offset)
rdev->recovery_offset = mddev->curr_resync_completed;
}
if (!mddev->persistent) {
clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
clear_bit(MD_CHANGE_DEVS, &mddev->flags);
if (!mddev->external) {
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
rdev_for_each(rdev, mddev) {
if (rdev->badblocks.changed) {
rdev->badblocks.changed = 0;
md_ack_all_badblocks(&rdev->badblocks);
md_error(mddev, rdev);
}
clear_bit(Blocked, &rdev->flags);
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
}
}
wake_up(&mddev->sb_wait);
return;
}
spin_lock(&mddev->lock);
mddev->utime = get_seconds();
if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
force_change = 1;
if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
/* just a clean<-> dirty transition, possibly leave spares alone,
* though if events isn't the right even/odd, we will have to do
* spares after all
*/
nospares = 1;
if (force_change)
nospares = 0;
if (mddev->degraded)
/* If the array is degraded, then skipping spares is both
* dangerous and fairly pointless.
* Dangerous because a device that was removed from the array
* might have a event_count that still looks up-to-date,
* so it can be re-added without a resync.
* Pointless because if there are any spares to skip,
* then a recovery will happen and soon that array won't
* be degraded any more and the spare can go back to sleep then.
*/
nospares = 0;
sync_req = mddev->in_sync;
/* If this is just a dirty<->clean transition, and the array is clean
* and 'events' is odd, we can roll back to the previous clean state */
if (nospares
&& (mddev->in_sync && mddev->recovery_cp == MaxSector)
&& mddev->can_decrease_events
&& mddev->events != 1) {
mddev->events--;
mddev->can_decrease_events = 0;
} else {
/* otherwise we have to go forward and ... */
mddev->events ++;
mddev->can_decrease_events = nospares;
}
/*
* This 64-bit counter should never wrap.
* Either we are in around ~1 trillion A.C., assuming
* 1 reboot per second, or we have a bug...
*/
WARN_ON(mddev->events == 0);
rdev_for_each(rdev, mddev) {
if (rdev->badblocks.changed)
any_badblocks_changed++;
if (test_bit(Faulty, &rdev->flags))
set_bit(FaultRecorded, &rdev->flags);
}
sync_sbs(mddev, nospares);
spin_unlock(&mddev->lock);
pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
mdname(mddev), mddev->in_sync);
bitmap_update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
if (rdev->sb_loaded != 1)
continue; /* no noise on spare devices */
if (!test_bit(Faulty, &rdev->flags)) {
md_super_write(mddev,rdev,
rdev->sb_start, rdev->sb_size,
rdev->sb_page);
pr_debug("md: (write) %s's sb offset: %llu\n",
bdevname(rdev->bdev, b),
(unsigned long long)rdev->sb_start);
rdev->sb_events = mddev->events;
if (rdev->badblocks.size) {
md_super_write(mddev, rdev,
rdev->badblocks.sector,
rdev->badblocks.size << 9,
rdev->bb_page);
rdev->badblocks.size = 0;
}
} else
pr_debug("md: %s (skipping faulty)\n",
bdevname(rdev->bdev, b));
if (mddev->level == LEVEL_MULTIPATH)
/* only need to write one superblock... */
break;
}
md_super_wait(mddev);
/* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
spin_lock(&mddev->lock);
if (mddev->in_sync != sync_req ||
test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
/* have to write it out again */
spin_unlock(&mddev->lock);
goto repeat;
}
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
spin_unlock(&mddev->lock);
wake_up(&mddev->sb_wait);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
rdev_for_each(rdev, mddev) {
if (test_and_clear_bit(FaultRecorded, &rdev->flags))
clear_bit(Blocked, &rdev->flags);
if (any_badblocks_changed)
md_ack_all_badblocks(&rdev->badblocks);
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
}
}
EXPORT_SYMBOL(md_update_sb);
static int add_bound_rdev(struct md_rdev *rdev)
{
struct mddev *mddev = rdev->mddev;
int err = 0;
if (!mddev->pers->hot_remove_disk) {
/* If there is hot_add_disk but no hot_remove_disk
* then added disks for geometry changes,
* and should be added immediately.
*/
super_types[mddev->major_version].
validate_super(mddev, rdev);
err = mddev->pers->hot_add_disk(mddev, rdev);
if (err) {
unbind_rdev_from_array(rdev);
export_rdev(rdev);
return err;
}
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_new_event(mddev);
md_wakeup_thread(mddev->thread);
return 0;
}
/* words written to sysfs files may, or may not, be \n terminated.
* We want to accept with case. For this we use cmd_match.
*/
static int cmd_match(const char *cmd, const char *str)
{
/* See if cmd, written into a sysfs file, matches
* str. They must either be the same, or cmd can
* have a trailing newline
*/
while (*cmd && *str && *cmd == *str) {
cmd++;
str++;
}
if (*cmd == '\n')
cmd++;
if (*str || *cmd)
return 0;
return 1;
}
struct rdev_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct md_rdev *, char *);
ssize_t (*store)(struct md_rdev *, const char *, size_t);
};
static ssize_t
state_show(struct md_rdev *rdev, char *page)
{
char *sep = "";
size_t len = 0;
unsigned long flags = ACCESS_ONCE(rdev->flags);
if (test_bit(Faulty, &flags) ||
rdev->badblocks.unacked_exist) {
len+= sprintf(page+len, "%sfaulty",sep);
sep = ",";
}
if (test_bit(In_sync, &flags)) {
len += sprintf(page+len, "%sin_sync",sep);
sep = ",";
}
if (test_bit(WriteMostly, &flags)) {
len += sprintf(page+len, "%swrite_mostly",sep);
sep = ",";
}
if (test_bit(Blocked, &flags) ||
(rdev->badblocks.unacked_exist
&& !test_bit(Faulty, &flags))) {
len += sprintf(page+len, "%sblocked", sep);
sep = ",";
}
if (!test_bit(Faulty, &flags) &&
!test_bit(In_sync, &flags)) {
len += sprintf(page+len, "%sspare", sep);
sep = ",";
}
if (test_bit(WriteErrorSeen, &flags)) {
len += sprintf(page+len, "%swrite_error", sep);
sep = ",";
}
if (test_bit(WantReplacement, &flags)) {
len += sprintf(page+len, "%swant_replacement", sep);
sep = ",";
}
if (test_bit(Replacement, &flags)) {
len += sprintf(page+len, "%sreplacement", sep);
sep = ",";
}
return len+sprintf(page+len, "\n");
}
static ssize_t
state_store(struct md_rdev *rdev, const char *buf, size_t len)
{
/* can write
* faulty - simulates an error
* remove - disconnects the device
* writemostly - sets write_mostly
* -writemostly - clears write_mostly
* blocked - sets the Blocked flags
* -blocked - clears the Blocked and possibly simulates an error
* insync - sets Insync providing device isn't active
* -insync - clear Insync for a device with a slot assigned,
* so that it gets rebuilt based on bitmap
* write_error - sets WriteErrorSeen
* -write_error - clears WriteErrorSeen
*/
int err = -EINVAL;
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
md_error(rdev->mddev, rdev);
if (test_bit(Faulty, &rdev->flags))
err = 0;
else
err = -EBUSY;
} else if (cmd_match(buf, "remove")) {
if (rdev->raid_disk >= 0)
err = -EBUSY;
else {
struct mddev *mddev = rdev->mddev;
if (mddev_is_clustered(mddev))
md_cluster_ops->remove_disk(mddev, rdev);
md_kick_rdev_from_array(rdev);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
if (mddev->pers)
md_update_sb(mddev, 1);
md_new_event(mddev);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
err = 0;
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-writemostly")) {
clear_bit(WriteMostly, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "blocked")) {
set_bit(Blocked, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-blocked")) {
if (!test_bit(Faulty, &rdev->flags) &&
rdev->badblocks.unacked_exist) {
/* metadata handler doesn't understand badblocks,
* so we need to fail the device
*/
md_error(rdev->mddev, rdev);
}
clear_bit(Blocked, &rdev->flags);
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
err = 0;
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
set_bit(In_sync, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) {
if (rdev->mddev->pers == NULL) {
clear_bit(In_sync, &rdev->flags);
rdev->saved_raid_disk = rdev->raid_disk;
rdev->raid_disk = -1;
err = 0;
}
} else if (cmd_match(buf, "write_error")) {
set_bit(WriteErrorSeen, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-write_error")) {
clear_bit(WriteErrorSeen, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "want_replacement")) {
/* Any non-spare device that is not a replacement can
* become want_replacement at any time, but we then need to
* check if recovery is needed.
*/
if (rdev->raid_disk >= 0 &&
!test_bit(Replacement, &rdev->flags))
set_bit(WantReplacement, &rdev->flags);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
err = 0;
} else if (cmd_match(buf, "-want_replacement")) {
/* Clearing 'want_replacement' is always allowed.
* Once replacements starts it is too late though.
*/
err = 0;
clear_bit(WantReplacement, &rdev->flags);
} else if (cmd_match(buf, "replacement")) {
/* Can only set a device as a replacement when array has not
* yet been started. Once running, replacement is automatic
* from spares, or by assigning 'slot'.
*/
if (rdev->mddev->pers)
err = -EBUSY;
else {
set_bit(Replacement, &rdev->flags);
err = 0;
}
} else if (cmd_match(buf, "-replacement")) {
/* Similarly, can only clear Replacement before start */
if (rdev->mddev->pers)
err = -EBUSY;
else {
clear_bit(Replacement, &rdev->flags);
err = 0;
}
} else if (cmd_match(buf, "re-add")) {
if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
/* clear_bit is performed _after_ all the devices
* have their local Faulty bit cleared. If any writes
* happen in the meantime in the local node, they
* will land in the local bitmap, which will be synced
* by this node eventually
*/
if (!mddev_is_clustered(rdev->mddev) ||
(err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
clear_bit(Faulty, &rdev->flags);
err = add_bound_rdev(rdev);
}
} else
err = -EBUSY;
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
return err ? err : len;
}
static struct rdev_sysfs_entry rdev_state =
__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
static ssize_t
errors_show(struct md_rdev *rdev, char *page)
{
return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
}
static ssize_t
errors_store(struct md_rdev *rdev, const char *buf, size_t len)
{
unsigned int n;
int rv;
rv = kstrtouint(buf, 10, &n);
if (rv < 0)
return rv;
atomic_set(&rdev->corrected_errors, n);
return len;
}
static struct rdev_sysfs_entry rdev_errors =
__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
static ssize_t
slot_show(struct md_rdev *rdev, char *page)
{
if (rdev->raid_disk < 0)
return sprintf(page, "none\n");
else
return sprintf(page, "%d\n", rdev->raid_disk);
}
static ssize_t
slot_store(struct md_rdev *rdev, const char *buf, size_t len)
{
int slot;
int err;
if (strncmp(buf, "none", 4)==0)
slot = -1;
else {
err = kstrtouint(buf, 10, (unsigned int *)&slot);
if (err < 0)
return err;
}
if (rdev->mddev->pers && slot == -1) {
/* Setting 'slot' on an active array requires also
* updating the 'rd%d' link, and communicating
* with the personality with ->hot_*_disk.
* For now we only support removing
* failed/spare devices. This normally happens automatically,
* but not when the metadata is externally managed.
*/
if (rdev->raid_disk == -1)
return -EEXIST;
/* personality does all needed checks */
if (rdev->mddev->pers->hot_remove_disk == NULL)
return -EINVAL;
clear_bit(Blocked, &rdev->flags);
remove_and_add_spares(rdev->mddev, rdev);
if (rdev->raid_disk >= 0)
return -EBUSY;
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
} else if (rdev->mddev->pers) {
/* Activating a spare .. or possibly reactivating
* if we ever get bitmaps working here.
*/
if (rdev->raid_disk != -1)
return -EBUSY;
if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
return -EBUSY;
if (rdev->mddev->pers->hot_add_disk == NULL)
return -EINVAL;
if (slot >= rdev->mddev->raid_disks &&
slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
return -ENOSPC;
rdev->raid_disk = slot;
if (test_bit(In_sync, &rdev->flags))
rdev->saved_raid_disk = slot;
else
rdev->saved_raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
err = rdev->mddev->pers->
hot_add_disk(rdev->mddev, rdev);
if (err) {
rdev->raid_disk = -1;
return err;
} else
sysfs_notify_dirent_safe(rdev->sysfs_state);
if (sysfs_link_rdev(rdev->mddev, rdev))
/* failure here is OK */;
/* don't wakeup anyone, leave that to userspace. */
} else {
if (slot >= rdev->mddev->raid_disks &&
slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
return -ENOSPC;
rdev->raid_disk = slot;
/* assume it is working */
clear_bit(Faulty, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
set_bit(In_sync, &rdev->flags);
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
return len;
}
static struct rdev_sysfs_entry rdev_slot =
__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
static ssize_t
offset_show(struct md_rdev *rdev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
}
static ssize_t
offset_store(struct md_rdev *rdev, const char *buf, size_t len)
{
unsigned long long offset;
if (kstrtoull(buf, 10, &offset) < 0)
return -EINVAL;
if (rdev->mddev->pers && rdev->raid_disk >= 0)
return -EBUSY;
if (rdev->sectors && rdev->mddev->external)
/* Must set offset before size, so overlap checks
* can be sane */
return -EBUSY;
rdev->data_offset = offset;
rdev->new_data_offset = offset;
return len;
}
static struct rdev_sysfs_entry rdev_offset =
__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)rdev->new_data_offset);
}
static ssize_t new_offset_store(struct md_rdev *rdev,
const char *buf, size_t len)
{
unsigned long long new_offset;
struct mddev *mddev = rdev->mddev;
if (kstrtoull(buf, 10, &new_offset) < 0)
return -EINVAL;
if (mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
return -EBUSY;
if (new_offset == rdev->data_offset)
/* reset is always permitted */
;
else if (new_offset > rdev->data_offset) {
/* must not push array size beyond rdev_sectors */
if (new_offset - rdev->data_offset
+ mddev->dev_sectors > rdev->sectors)
return -E2BIG;
}
/* Metadata worries about other space details. */
/* decreasing the offset is inconsistent with a backwards
* reshape.
*/
if (new_offset < rdev->data_offset &&
mddev->reshape_backwards)
return -EINVAL;
/* Increasing offset is inconsistent with forwards
* reshape. reshape_direction should be set to
* 'backwards' first.
*/
if (new_offset > rdev->data_offset &&
!mddev->reshape_backwards)
return -EINVAL;
if (mddev->pers && mddev->persistent &&
!super_types[mddev->major_version]
.allow_new_offset(rdev, new_offset))
return -E2BIG;
rdev->new_data_offset = new_offset;
if (new_offset > rdev->data_offset)
mddev->reshape_backwards = 1;
else if (new_offset < rdev->data_offset)
mddev->reshape_backwards = 0;
return len;
}
static struct rdev_sysfs_entry rdev_new_offset =
__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
static ssize_t
rdev_size_show(struct md_rdev *rdev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
}
static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
{
/* check if two start/length pairs overlap */
if (s1+l1 <= s2)
return 0;
if (s2+l2 <= s1)
return 0;
return 1;
}
static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
{
unsigned long long blocks;
sector_t new;
if (kstrtoull(buf, 10, &blocks) < 0)
return -EINVAL;
if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
return -EINVAL; /* sector conversion overflow */
new = blocks * 2;
if (new != blocks * 2)
return -EINVAL; /* unsigned long long to sector_t overflow */
*sectors = new;
return 0;
}
static ssize_t
rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
{
struct mddev *my_mddev = rdev->mddev;
sector_t oldsectors = rdev->sectors;
sector_t sectors;
if (strict_blocks_to_sectors(buf, §ors) < 0)
return -EINVAL;
if (rdev->data_offset != rdev->new_data_offset)
return -EINVAL; /* too confusing */
if (my_mddev->pers && rdev->raid_disk >= 0) {
if (my_mddev->persistent) {
sectors = super_types[my_mddev->major_version].
rdev_size_change(rdev, sectors);
if (!sectors)
return -EBUSY;
} else if (!sectors)
sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
rdev->data_offset;
if (!my_mddev->pers->resize)
/* Cannot change size for RAID0 or Linear etc */
return -EINVAL;
}
if (sectors < my_mddev->dev_sectors)
return -EINVAL; /* component must fit device */
rdev->sectors = sectors;
if (sectors > oldsectors && my_mddev->external) {
/* Need to check that all other rdevs with the same
* ->bdev do not overlap. 'rcu' is sufficient to walk
* the rdev lists safely.
* This check does not provide a hard guarantee, it
* just helps avoid dangerous mistakes.
*/
struct mddev *mddev;
int overlap = 0;
struct list_head *tmp;
rcu_read_lock();
for_each_mddev(mddev, tmp) {
struct md_rdev *rdev2;
rdev_for_each(rdev2, mddev)
if (rdev->bdev == rdev2->bdev &&
rdev != rdev2 &&
overlaps(rdev->data_offset, rdev->sectors,
rdev2->data_offset,
rdev2->sectors)) {
overlap = 1;
break;
}
if (overlap) {
mddev_put(mddev);
break;
}
}
rcu_read_unlock();
if (overlap) {
/* Someone else could have slipped in a size
* change here, but doing so is just silly.
* We put oldsectors back because we *know* it is
* safe, and trust userspace not to race with
* itself
*/
rdev->sectors = oldsectors;
return -EBUSY;
}
}
return len;
}
static struct rdev_sysfs_entry rdev_size =
__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
{
unsigned long long recovery_start = rdev->recovery_offset;
if (test_bit(In_sync, &rdev->flags) ||
recovery_start == MaxSector)
return sprintf(page, "none\n");
return sprintf(page, "%llu\n", recovery_start);
}
static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
{
unsigned long long recovery_start;
if (cmd_match(buf, "none"))
recovery_start = MaxSector;
else if (kstrtoull(buf, 10, &recovery_start))
return -EINVAL;
if (rdev->mddev->pers &&
rdev->raid_disk >= 0)
return -EBUSY;
rdev->recovery_offset = recovery_start;
if (recovery_start == MaxSector)
set_bit(In_sync, &rdev->flags);
else
clear_bit(In_sync, &rdev->flags);
return len;
}
static struct rdev_sysfs_entry rdev_recovery_start =
__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
static ssize_t
badblocks_show(struct badblocks *bb, char *page, int unack);
static ssize_t
badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);
static ssize_t bb_show(struct md_rdev *rdev, char *page)
{
return badblocks_show(&rdev->badblocks, page, 0);
}
static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
{
int rv = badblocks_store(&rdev->badblocks, page, len, 0);
/* Maybe that ack was all we needed */
if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
wake_up(&rdev->blocked_wait);
return rv;
}
static struct rdev_sysfs_entry rdev_bad_blocks =
__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
static ssize_t ubb_show(struct md_rdev *rdev, char *page)
{
return badblocks_show(&rdev->badblocks, page, 1);
}
static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
{
return badblocks_store(&rdev->badblocks, page, len, 1);
}
static struct rdev_sysfs_entry rdev_unack_bad_blocks =
__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
static struct attribute *rdev_default_attrs[] = {
&rdev_state.attr,
&rdev_errors.attr,
&rdev_slot.attr,
&rdev_offset.attr,
&rdev_new_offset.attr,
&rdev_size.attr,
&rdev_recovery_start.attr,
&rdev_bad_blocks.attr,
&rdev_unack_bad_blocks.attr,
NULL,
};
static ssize_t
rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
if (!entry->show)
return -EIO;
if (!rdev->mddev)
return -EBUSY;
return entry->show(rdev, page);
}
static ssize_t
rdev_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
ssize_t rv;
struct mddev *mddev = rdev->mddev;
if (!entry->store)
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
rv = mddev ? mddev_lock(mddev): -EBUSY;
if (!rv) {
if (rdev->mddev == NULL)
rv = -EBUSY;
else
rv = entry->store(rdev, page, length);
mddev_unlock(mddev);
}
return rv;
}
static void rdev_free(struct kobject *ko)
{
struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
kfree(rdev);
}
static const struct sysfs_ops rdev_sysfs_ops = {
.show = rdev_attr_show,
.store = rdev_attr_store,
};
static struct kobj_type rdev_ktype = {
.release = rdev_free,
.sysfs_ops = &rdev_sysfs_ops,
.default_attrs = rdev_default_attrs,
};
int md_rdev_init(struct md_rdev *rdev)
{
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
rdev->raid_disk = -1;
rdev->flags = 0;
rdev->data_offset = 0;
rdev->new_data_offset = 0;
rdev->sb_events = 0;
rdev->last_read_error.tv_sec = 0;
rdev->last_read_error.tv_nsec = 0;
rdev->sb_loaded = 0;
rdev->bb_page = NULL;
atomic_set(&rdev->nr_pending, 0);
atomic_set(&rdev->read_errors, 0);
atomic_set(&rdev->corrected_errors, 0);
INIT_LIST_HEAD(&rdev->same_set);
init_waitqueue_head(&rdev->blocked_wait);
/* Add space to store bad block list.
* This reserves the space even on arrays where it cannot
* be used - I wonder if that matters
*/
rdev->badblocks.count = 0;
rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
seqlock_init(&rdev->badblocks.lock);
if (rdev->badblocks.page == NULL)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(md_rdev_init);
/*
* Import a device. If 'super_format' >= 0, then sanity check the superblock
*
* mark the device faulty if:
*
* - the device is nonexistent (zero size)
* - the device has no valid superblock
*
* a faulty rdev _never_ has rdev->sb set.
*/
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
{
char b[BDEVNAME_SIZE];
int err;
struct md_rdev *rdev;
sector_t size;
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
if (!rdev) {
printk(KERN_ERR "md: could not alloc mem for new device!\n");
return ERR_PTR(-ENOMEM);
}
err = md_rdev_init(rdev);
if (err)
goto abort_free;
err = alloc_disk_sb(rdev);
if (err)
goto abort_free;
err = lock_rdev(rdev, newdev, super_format == -2);
if (err)
goto abort_free;
kobject_init(&rdev->kobj, &rdev_ktype);
size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
if (!size) {
printk(KERN_WARNING
"md: %s has zero or unknown size, marking faulty!\n",
bdevname(rdev->bdev,b));
err = -EINVAL;
goto abort_free;
}
if (super_format >= 0) {
err = super_types[super_format].
load_super(rdev, NULL, super_minor);
if (err == -EINVAL) {
printk(KERN_WARNING
"md: %s does not have a valid v%d.%d "
"superblock, not importing!\n",
bdevname(rdev->bdev,b),
super_format, super_minor);
goto abort_free;
}
if (err < 0) {
printk(KERN_WARNING
"md: could not read %s's sb, not importing!\n",
bdevname(rdev->bdev,b));
goto abort_free;
}
}
return rdev;
abort_free:
if (rdev->bdev)
unlock_rdev(rdev);
md_rdev_clear(rdev);
kfree(rdev);
return ERR_PTR(err);
}
/*
* Check a full RAID array for plausibility
*/
static void analyze_sbs(struct mddev *mddev)
{
int i;
struct md_rdev *rdev, *freshest, *tmp;
char b[BDEVNAME_SIZE];
freshest = NULL;
rdev_for_each_safe(rdev, tmp, mddev)
switch (super_types[mddev->major_version].
load_super(rdev, freshest, mddev->minor_version)) {
case 1:
freshest = rdev;
break;
case 0:
break;
default:
printk( KERN_ERR \
"md: fatal superblock inconsistency in %s"
" -- removing from array\n",
bdevname(rdev->bdev,b));
md_kick_rdev_from_array(rdev);
}
super_types[mddev->major_version].
validate_super(mddev, freshest);
i = 0;
rdev_for_each_safe(rdev, tmp, mddev) {
if (mddev->max_disks &&
(rdev->desc_nr >= mddev->max_disks ||
i > mddev->max_disks)) {
printk(KERN_WARNING
"md: %s: %s: only %d devices permitted\n",
mdname(mddev), bdevname(rdev->bdev, b),
mddev->max_disks);
md_kick_rdev_from_array(rdev);
continue;
}
if (rdev != freshest) {
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
printk(KERN_WARNING "md: kicking non-fresh %s"
" from array!\n",
bdevname(rdev->bdev,b));
md_kick_rdev_from_array(rdev);
continue;
}
/* No device should have a Candidate flag
* when reading devices
*/
if (test_bit(Candidate, &rdev->flags)) {
pr_info("md: kicking Cluster Candidate %s from array!\n",
bdevname(rdev->bdev, b));
md_kick_rdev_from_array(rdev);
}
}
if (mddev->level == LEVEL_MULTIPATH) {
rdev->desc_nr = i++;
rdev->raid_disk = rdev->desc_nr;
set_bit(In_sync, &rdev->flags);
} else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
rdev->raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
}
}
}
/* Read a fixed-point number.
* Numbers in sysfs attributes should be in "standard" units where
* possible, so time should be in seconds.
* However we internally use a a much smaller unit such as
* milliseconds or jiffies.
* This function takes a decimal number with a possible fractional
* component, and produces an integer which is the result of
* multiplying that number by 10^'scale'.
* all without any floating-point arithmetic.
*/
int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
{
unsigned long result = 0;
long decimals = -1;
while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
if (*cp == '.')
decimals = 0;
else if (decimals < scale) {
unsigned int value;
value = *cp - '0';
result = result * 10 + value;
if (decimals >= 0)
decimals++;
}
cp++;
}
if (*cp == '\n')
cp++;
if (*cp)
return -EINVAL;
if (decimals < 0)
decimals = 0;
while (decimals < scale) {
result *= 10;
decimals ++;
}
*res = result;
return 0;
}
static void md_safemode_timeout(unsigned long data);
static ssize_t
safe_delay_show(struct mddev *mddev, char *page)
{
int msec = (mddev->safemode_delay*1000)/HZ;
return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
}
static ssize_t
safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
{
unsigned long msec;
if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
return -EINVAL;
if (msec == 0)
mddev->safemode_delay = 0;
else {
unsigned long old_delay = mddev->safemode_delay;
unsigned long new_delay = (msec*HZ)/1000;
if (new_delay == 0)
new_delay = 1;
mddev->safemode_delay = new_delay;
if (new_delay < old_delay || old_delay == 0)
mod_timer(&mddev->safemode_timer, jiffies+1);
}
return len;
}
static struct md_sysfs_entry md_safe_delay =
__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
static ssize_t
level_show(struct mddev *mddev, char *page)
{
struct md_personality *p;
int ret;
spin_lock(&mddev->lock);
p = mddev->pers;
if (p)
ret = sprintf(page, "%s\n", p->name);
else if (mddev->clevel[0])
ret = sprintf(page, "%s\n", mddev->clevel);
else if (mddev->level != LEVEL_NONE)
ret = sprintf(page, "%d\n", mddev->level);
else
ret = 0;
spin_unlock(&mddev->lock);
return ret;
}
static ssize_t
level_store(struct mddev *mddev, const char *buf, size_t len)
{
char clevel[16];
ssize_t rv;
size_t slen = len;
struct md_personality *pers, *oldpers;
long level;
void *priv, *oldpriv;
struct md_rdev *rdev;
if (slen == 0 || slen >= sizeof(clevel))
return -EINVAL;
rv = mddev_lock(mddev);
if (rv)
return rv;
if (mddev->pers == NULL) {
strncpy(mddev->clevel, buf, slen);
if (mddev->clevel[slen-1] == '\n')
slen--;
mddev->clevel[slen] = 0;
mddev->level = LEVEL_NONE;
rv = len;
goto out_unlock;
}
rv = -EROFS;
if (mddev->ro)
goto out_unlock;
/* request to change the personality. Need to ensure:
* - array is not engaged in resync/recovery/reshape
* - old personality can be suspended
* - new personality will access other array.
*/
rv = -EBUSY;
if (mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
mddev->reshape_position != MaxSector ||
mddev->sysfs_active)
goto out_unlock;
rv = -EINVAL;
if (!mddev->pers->quiesce) {
printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
mdname(mddev), mddev->pers->name);
goto out_unlock;
}
/* Now find the new personality */
strncpy(clevel, buf, slen);
if (clevel[slen-1] == '\n')
slen--;
clevel[slen] = 0;
if (kstrtol(clevel, 10, &level))
level = LEVEL_NONE;
if (request_module("md-%s", clevel) != 0)
request_module("md-level-%s", clevel);
spin_lock(&pers_lock);
pers = find_pers(level, clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
rv = -EINVAL;
goto out_unlock;
}
spin_unlock(&pers_lock);
if (pers == mddev->pers) {
/* Nothing to do! */
module_put(pers->owner);
rv = len;
goto out_unlock;
}
if (!pers->takeover) {
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
mdname(mddev), clevel);
rv = -EINVAL;
goto out_unlock;
}
rdev_for_each(rdev, mddev)
rdev->new_raid_disk = rdev->raid_disk;
/* ->takeover must set new_* and/or delta_disks
* if it succeeds, and may set them when it fails.
*/
priv = pers->takeover(mddev);
if (IS_ERR(priv)) {
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->raid_disks -= mddev->delta_disks;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s would not accept array\n",
mdname(mddev), clevel);
rv = PTR_ERR(priv);
goto out_unlock;
}
/* Looks like we have a winner */
mddev_suspend(mddev);
mddev_detach(mddev);
spin_lock(&mddev->lock);
oldpers = mddev->pers;
oldpriv = mddev->private;
mddev->pers = pers;
mddev->private = priv;
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
mddev->level = mddev->new_level;
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
mddev->degraded = 0;
spin_unlock(&mddev->lock);
if (oldpers->sync_request == NULL &&
mddev->external) {
/* We are converting from a no-redundancy array
* to a redundancy array and metadata is managed
* externally so we need to be sure that writes
* won't block due to a need to transition
* clean->dirty
* until external management is started.
*/
mddev->in_sync = 0;
mddev->safemode_delay = 0;
mddev->safemode = 0;
}
oldpers->free(mddev, oldpriv);
if (oldpers->sync_request == NULL &&
pers->sync_request != NULL) {
/* need to add the md_redundancy_group */
if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
}
if (oldpers->sync_request != NULL &&
pers->sync_request == NULL) {
/* need to remove the md_redundancy_group */
if (mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
}
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk < 0)
continue;
if (rdev->new_raid_disk >= mddev->raid_disks)
rdev->new_raid_disk = -1;
if (rdev->new_raid_disk == rdev->raid_disk)
continue;
sysfs_unlink_rdev(mddev, rdev);
}
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk < 0)
continue;
if (rdev->new_raid_disk == rdev->raid_disk)
continue;
rdev->raid_disk = rdev->new_raid_disk;
if (rdev->raid_disk < 0)
clear_bit(In_sync, &rdev->flags);
else {
if (sysfs_link_rdev(mddev, rdev))
printk(KERN_WARNING "md: cannot register rd%d"
" for %s after level change\n",
rdev->raid_disk, mdname(mddev));
}
}
if (pers->sync_request == NULL) {
/* this is now an array without redundancy, so
* it must always be in_sync
*/
mddev->in_sync = 1;
del_timer_sync(&mddev->safemode_timer);
}
blk_set_stacking_limits(&mddev->queue->limits);
pers->run(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev_resume(mddev);
if (!mddev->thread)
md_update_sb(mddev, 1);
sysfs_notify(&mddev->kobj, NULL, "level");
md_new_event(mddev);
rv = len;
out_unlock:
mddev_unlock(mddev);
return rv;
}
static struct md_sysfs_entry md_level =
__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
static ssize_t
layout_show(struct mddev *mddev, char *page)
{
/* just a number, not meaningful for all levels */
if (mddev->reshape_position != MaxSector &&
mddev->layout != mddev->new_layout)
return sprintf(page, "%d (%d)\n",
mddev->new_layout, mddev->layout);
return sprintf(page, "%d\n", mddev->layout);
}
static ssize_t
layout_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned int n;
int err;
err = kstrtouint(buf, 10, &n);
if (err < 0)
return err;
err = mddev_lock(mddev);
if (err)
return err;
if (mddev->pers) {
if (mddev->pers->check_reshape == NULL)
err = -EBUSY;
else if (mddev->ro)
err = -EROFS;
else {
mddev->new_layout = n;
err = mddev->pers->check_reshape(mddev);
if (err)
mddev->new_layout = mddev->layout;
}
} else {
mddev->new_layout = n;
if (mddev->reshape_position == MaxSector)
mddev->layout = n;
}
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_layout =
__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
static ssize_t
raid_disks_show(struct mddev *mddev, char *page)
{
if (mddev->raid_disks == 0)
return 0;
if (mddev->reshape_position != MaxSector &&
mddev->delta_disks != 0)
return sprintf(page, "%d (%d)\n", mddev->raid_disks,
mddev->raid_disks - mddev->delta_disks);
return sprintf(page, "%d\n", mddev->raid_disks);
}
static int update_raid_disks(struct mddev *mddev, int raid_disks);
static ssize_t
raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned int n;
int err;
err = kstrtouint(buf, 10, &n);
if (err < 0)
return err;
err = mddev_lock(mddev);
if (err)
return err;
if (mddev->pers)
err = update_raid_disks(mddev, n);
else if (mddev->reshape_position != MaxSector) {
struct md_rdev *rdev;
int olddisks = mddev->raid_disks - mddev->delta_disks;
err = -EINVAL;
rdev_for_each(rdev, mddev) {
if (olddisks < n &&
rdev->data_offset < rdev->new_data_offset)
goto out_unlock;
if (olddisks > n &&
rdev->data_offset > rdev->new_data_offset)
goto out_unlock;
}
err = 0;
mddev->delta_disks = n - olddisks;
mddev->raid_disks = n;
mddev->reshape_backwards = (mddev->delta_disks < 0);
} else
mddev->raid_disks = n;
out_unlock:
mddev_unlock(mddev);
return err ? err : len;
}
static struct md_sysfs_entry md_raid_disks =
__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
static ssize_t
chunk_size_show(struct mddev *mddev, char *page)
{
if (mddev->reshape_position != MaxSector &&
mddev->chunk_sectors != mddev->new_chunk_sectors)
return sprintf(page, "%d (%d)\n",
mddev->new_chunk_sectors << 9,
mddev->chunk_sectors << 9);
return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
}
static ssize_t
chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long n;
int err;
err = kstrtoul(buf, 10, &n);
if (err < 0)
return err;
err = mddev_lock(mddev);
if (err)
return err;
if (mddev->pers) {
if (mddev->pers->check_reshape == NULL)
err = -EBUSY;
else if (mddev->ro)
err = -EROFS;
else {
mddev->new_chunk_sectors = n >> 9;
err = mddev->pers->check_reshape(mddev);
if (err)
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
} else {
mddev->new_chunk_sectors = n >> 9;
if (mddev->reshape_position == MaxSector)
mddev->chunk_sectors = n >> 9;
}
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_chunk_size =
__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
static ssize_t
resync_start_show(struct mddev *mddev, char *page)
{
if (mddev->recovery_cp == MaxSector)
return sprintf(page, "none\n");
return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
}
static ssize_t
resync_start_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long long n;
int err;
if (cmd_match(buf, "none"))
n = MaxSector;
else {
err = kstrtoull(buf, 10, &n);
if (err < 0)
return err;
if (n != (sector_t)n)
return -EINVAL;
}
err = mddev_lock(mddev);
if (err)
return err;
if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
err = -EBUSY;
if (!err) {
mddev->recovery_cp = n;
if (mddev->pers)
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_resync_start =
__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
resync_start_show, resync_start_store);
/*
* The array state can be:
*
* clear
* No devices, no size, no level
* Equivalent to STOP_ARRAY ioctl
* inactive
* May have some settings, but array is not active
* all IO results in error
* When written, doesn't tear down array, but just stops it
* suspended (not supported yet)
* All IO requests will block. The array can be reconfigured.
* Writing this, if accepted, will block until array is quiescent
* readonly
* no resync can happen. no superblocks get written.
* write requests fail
* read-auto
* like readonly, but behaves like 'clean' on a write request.
*
* clean - no pending writes, but otherwise active.
* When written to inactive array, starts without resync
* If a write request arrives then
* if metadata is known, mark 'dirty' and switch to 'active'.
* if not known, block and switch to write-pending
* If written to an active array that has pending writes, then fails.
* active
* fully active: IO and resync can be happening.
* When written to inactive array, starts with resync
*
* write-pending
* clean, but writes are blocked waiting for 'active' to be written.
*
* active-idle
* like active, but no writes have been seen for a while (100msec).
*
*/
enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
write_pending, active_idle, bad_word};
static char *array_states[] = {
"clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
"write-pending", "active-idle", NULL };
static int match_word(const char *word, char **list)
{
int n;
for (n=0; list[n]; n++)
if (cmd_match(word, list[n]))
break;
return n;
}
static ssize_t
array_state_show(struct mddev *mddev, char *page)
{
enum array_state st = inactive;
if (mddev->pers)
switch(mddev->ro) {
case 1:
st = readonly;
break;
case 2:
st = read_auto;
break;
case 0:
if (mddev->in_sync)
st = clean;
else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
st = write_pending;
else if (mddev->safemode)
st = active_idle;
else
st = active;
}
else {
if (list_empty(&mddev->disks) &&
mddev->raid_disks == 0 &&
mddev->dev_sectors == 0)
st = clear;
else
st = inactive;
}
return sprintf(page, "%s\n", array_states[st]);
}
static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
static int do_md_run(struct mddev *mddev);
static int restart_array(struct mddev *mddev);
static ssize_t
array_state_store(struct mddev *mddev, const char *buf, size_t len)
{
int err;
enum array_state st = match_word(buf, array_states);
if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
/* don't take reconfig_mutex when toggling between
* clean and active
*/
spin_lock(&mddev->lock);
if (st == active) {
restart_array(mddev);
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
} else /* st == clean */ {
restart_array(mddev);
if (atomic_read(&mddev->writes_pending) == 0) {
if (mddev->in_sync == 0) {
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
err = 0;
} else
err = -EBUSY;
}
spin_unlock(&mddev->lock);
return err ?: len;
}
err = mddev_lock(mddev);
if (err)
return err;
err = -EINVAL;
switch(st) {
case bad_word:
break;
case clear:
/* stopping an active array */
err = do_md_stop(mddev, 0, NULL);
break;
case inactive:
/* stopping an active array */
if (mddev->pers)
err = do_md_stop(mddev, 2, NULL);
else
err = 0; /* already inactive */
break;
case suspended:
break; /* not supported yet */
case readonly:
if (mddev->pers)
err = md_set_readonly(mddev, NULL);
else {
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
err = do_md_run(mddev);
}
break;
case read_auto:
if (mddev->pers) {
if (mddev->ro == 0)
err = md_set_readonly(mddev, NULL);
else if (mddev->ro == 1)
err = restart_array(mddev);
if (err == 0) {
mddev->ro = 2;
set_disk_ro(mddev->gendisk, 0);
}
} else {
mddev->ro = 2;
err = do_md_run(mddev);
}
break;
case clean:
if (mddev->pers) {
restart_array(mddev);
spin_lock(&mddev->lock);
if (atomic_read(&mddev->writes_pending) == 0) {
if (mddev->in_sync == 0) {
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
err = 0;
} else
err = -EBUSY;
spin_unlock(&mddev->lock);
} else
err = -EINVAL;
break;
case active:
if (mddev->pers) {
restart_array(mddev);
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
mddev->ro = 0;
set_disk_ro(mddev->gendisk, 0);
err = do_md_run(mddev);
}
break;
case write_pending:
case active_idle:
/* these cannot be set */
break;
}
if (!err) {
if (mddev->hold_active == UNTIL_IOCTL)
mddev->hold_active = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_array_state =
__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
static ssize_t
max_corrected_read_errors_show(struct mddev *mddev, char *page) {
return sprintf(page, "%d\n",
atomic_read(&mddev->max_corr_read_errors));
}
static ssize_t
max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned int n;
int rv;
rv = kstrtouint(buf, 10, &n);
if (rv < 0)
return rv;
atomic_set(&mddev->max_corr_read_errors, n);
return len;
}
static struct md_sysfs_entry max_corr_read_errors =
__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
max_corrected_read_errors_store);
static ssize_t
null_show(struct mddev *mddev, char *page)
{
return -EINVAL;
}
static ssize_t
new_dev_store(struct mddev *mddev, const char *buf, size_t len)
{
/* buf must be %d:%d\n? giving major and minor numbers */
/* The new device is added to the array.
* If the array has a persistent superblock, we read the
* superblock to initialise info and check validity.
* Otherwise, only checking done is that in bind_rdev_to_array,
* which mainly checks size.
*/
char *e;
int major = simple_strtoul(buf, &e, 10);
int minor;
dev_t dev;
struct md_rdev *rdev;
int err;
if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
return -EINVAL;
minor = simple_strtoul(e+1, &e, 10);
if (*e && *e != '\n')
return -EINVAL;
dev = MKDEV(major, minor);
if (major != MAJOR(dev) ||
minor != MINOR(dev))
return -EOVERFLOW;
flush_workqueue(md_misc_wq);
err = mddev_lock(mddev);
if (err)
return err;
if (mddev->persistent) {
rdev = md_import_device(dev, mddev->major_version,
mddev->minor_version);
if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
struct md_rdev *rdev0
= list_entry(mddev->disks.next,
struct md_rdev, same_set);
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0)
goto out;
}
} else if (mddev->external)
rdev = md_import_device(dev, -2, -1);
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev)) {
mddev_unlock(mddev);
return PTR_ERR(rdev);
}
err = bind_rdev_to_array(rdev, mddev);
out:
if (err)
export_rdev(rdev);
mddev_unlock(mddev);
return err ? err : len;
}
static struct md_sysfs_entry md_new_device =
__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
static ssize_t
bitmap_store(struct mddev *mddev, const char *buf, size_t len)
{
char *end;
unsigned long chunk, end_chunk;
int err;
err = mddev_lock(mddev);
if (err)
return err;
if (!mddev->bitmap)
goto out;
/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
while (*buf) {
chunk = end_chunk = simple_strtoul(buf, &end, 0);
if (buf == end) break;
if (*end == '-') { /* range */
buf = end + 1;
end_chunk = simple_strtoul(buf, &end, 0);
if (buf == end) break;
}
if (*end && !isspace(*end)) break;
bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
buf = skip_spaces(end);
}
bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
out:
mddev_unlock(mddev);
return len;
}
static struct md_sysfs_entry md_bitmap =
__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
static ssize_t
size_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)mddev->dev_sectors / 2);
}
static int update_size(struct mddev *mddev, sector_t num_sectors);
static ssize_t
size_store(struct mddev *mddev, const char *buf, size_t len)
{
/* If array is inactive, we can reduce the component size, but
* not increase it (except from 0).
* If array is active, we can try an on-line resize
*/
sector_t sectors;
int err = strict_blocks_to_sectors(buf, §ors);
if (err < 0)
return err;
err = mddev_lock(mddev);
if (err)
return err;
if (mddev->pers) {
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
err = update_size(mddev, sectors);
md_update_sb(mddev, 1);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
} else {
if (mddev->dev_sectors == 0 ||
mddev->dev_sectors > sectors)
mddev->dev_sectors = sectors;
else
err = -ENOSPC;
}
mddev_unlock(mddev);
return err ? err : len;
}
static struct md_sysfs_entry md_size =
__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
/* Metadata version.
* This is one of
* 'none' for arrays with no metadata (good luck...)
* 'external' for arrays with externally managed metadata,
* or N.M for internally known formats
*/
static ssize_t
metadata_show(struct mddev *mddev, char *page)
{
if (mddev->persistent)
return sprintf(page, "%d.%d\n",
mddev->major_version, mddev->minor_version);
else if (mddev->external)
return sprintf(page, "external:%s\n", mddev->metadata_type);
else
return sprintf(page, "none\n");
}
static ssize_t
metadata_store(struct mddev *mddev, const char *buf, size_t len)
{
int major, minor;
char *e;
int err;
/* Changing the details of 'external' metadata is
* always permitted. Otherwise there must be
* no devices attached to the array.
*/
err = mddev_lock(mddev);
if (err)
return err;
err = -EBUSY;
if (mddev->external && strncmp(buf, "external:", 9) == 0)
;
else if (!list_empty(&mddev->disks))
goto out_unlock;
err = 0;
if (cmd_match(buf, "none")) {
mddev->persistent = 0;
mddev->external = 0;
mddev->major_version = 0;
mddev->minor_version = 90;
goto out_unlock;
}
if (strncmp(buf, "external:", 9) == 0) {
size_t namelen = len-9;
if (namelen >= sizeof(mddev->metadata_type))
namelen = sizeof(mddev->metadata_type)-1;
strncpy(mddev->metadata_type, buf+9, namelen);
mddev->metadata_type[namelen] = 0;
if (namelen && mddev->metadata_type[namelen-1] == '\n')
mddev->metadata_type[--namelen] = 0;
mddev->persistent = 0;
mddev->external = 1;
mddev->major_version = 0;
mddev->minor_version = 90;
goto out_unlock;
}
major = simple_strtoul(buf, &e, 10);
err = -EINVAL;
if (e==buf || *e != '.')
goto out_unlock;
buf = e+1;
minor = simple_strtoul(buf, &e, 10);
if (e==buf || (*e && *e != '\n') )
goto out_unlock;
err = -ENOENT;
if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
goto out_unlock;
mddev->major_version = major;
mddev->minor_version = minor;
mddev->persistent = 1;
mddev->external = 0;
err = 0;
out_unlock:
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_metadata =
__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
static ssize_t
action_show(struct mddev *mddev, char *page)
{
char *type = "idle";
unsigned long recovery = mddev->recovery;
if (test_bit(MD_RECOVERY_FROZEN, &recovery))
type = "frozen";
else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
(!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
type = "reshape";
else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
type = "resync";
else if (test_bit(MD_RECOVERY_CHECK, &recovery))
type = "check";
else
type = "repair";
} else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
type = "recover";
}
return sprintf(page, "%s\n", type);
}
static ssize_t
action_store(struct mddev *mddev, const char *page, size_t len)
{
if (!mddev->pers || !mddev->pers->sync_request)
return -EINVAL;
if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
if (cmd_match(page, "frozen"))
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
else
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
mddev_lock(mddev) == 0) {
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
}
mddev_unlock(mddev);
}
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return -EBUSY;
else if (cmd_match(page, "resync"))
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
else if (cmd_match(page, "recover")) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (cmd_match(page, "reshape")) {
int err;
if (mddev->pers->start_reshape == NULL)
return -EINVAL;
err = mddev_lock(mddev);
if (!err) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
err = mddev->pers->start_reshape(mddev);
mddev_unlock(mddev);
}
if (err)
return err;
sysfs_notify(&mddev->kobj, NULL, "degraded");
} else {
if (cmd_match(page, "check"))
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
else if (!cmd_match(page, "repair"))
return -EINVAL;
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
}
if (mddev->ro == 2) {
/* A write to sync_action is enough to justify
* canceling read-auto mode
*/
mddev->ro = 0;
md_wakeup_thread(mddev->sync_thread);
}
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
return len;
}
static struct md_sysfs_entry md_scan_mode =
__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
static ssize_t
last_sync_action_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%s\n", mddev->last_sync_action);
}
static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
static ssize_t
mismatch_cnt_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)
atomic64_read(&mddev->resync_mismatches));
}
static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
static ssize_t
sync_min_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_min(mddev),
mddev->sync_speed_min ? "local": "system");
}
static ssize_t
sync_min_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned int min;
int rv;
if (strncmp(buf, "system", 6)==0) {
min = 0;
} else {
rv = kstrtouint(buf, 10, &min);
if (rv < 0)
return rv;
if (min == 0)
return -EINVAL;
}
mddev->sync_speed_min = min;
return len;
}
static struct md_sysfs_entry md_sync_min =
__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
static ssize_t
sync_max_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_max(mddev),
mddev->sync_speed_max ? "local": "system");
}
static ssize_t
sync_max_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned int max;
int rv;
if (strncmp(buf, "system", 6)==0) {
max = 0;
} else {
rv = kstrtouint(buf, 10, &max);
if (rv < 0)
return rv;
if (max == 0)
return -EINVAL;
}
mddev->sync_speed_max = max;
return len;
}
static struct md_sysfs_entry md_sync_max =
__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
static ssize_t
degraded_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d\n", mddev->degraded);
}
static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
static ssize_t
sync_force_parallel_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d\n", mddev->parallel_resync);
}
static ssize_t
sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
{
long n;
if (kstrtol(buf, 10, &n))
return -EINVAL;
if (n != 0 && n != 1)
return -EINVAL;
mddev->parallel_resync = n;
if (mddev->sync_thread)
wake_up(&resync_wait);
return len;
}
/* force parallel resync, even with shared block devices */
static struct md_sysfs_entry md_sync_force_parallel =
__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
sync_force_parallel_show, sync_force_parallel_store);
static ssize_t
sync_speed_show(struct mddev *mddev, char *page)
{
unsigned long resync, dt, db;
if (mddev->curr_resync == 0)
return sprintf(page, "none\n");
resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
dt = (jiffies - mddev->resync_mark) / HZ;
if (!dt) dt++;
db = resync - mddev->resync_mark_cnt;
return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
}
static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
static ssize_t
sync_completed_show(struct mddev *mddev, char *page)
{
unsigned long long max_sectors, resync;
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return sprintf(page, "none\n");
if (mddev->curr_resync == 1 ||
mddev->curr_resync == 2)
return sprintf(page, "delayed\n");
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else
max_sectors = mddev->dev_sectors;
resync = mddev->curr_resync_completed;
return sprintf(page, "%llu / %llu\n", resync, max_sectors);
}
static struct md_sysfs_entry md_sync_completed =
__ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
static ssize_t
min_sync_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)mddev->resync_min);
}
static ssize_t
min_sync_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long long min;
int err;
if (kstrtoull(buf, 10, &min))
return -EINVAL;
spin_lock(&mddev->lock);
err = -EINVAL;
if (min > mddev->resync_max)
goto out_unlock;
err = -EBUSY;
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
goto out_unlock;
/* Round down to multiple of 4K for safety */
mddev->resync_min = round_down(min, 8);
err = 0;
out_unlock:
spin_unlock(&mddev->lock);
return err ?: len;
}
static struct md_sysfs_entry md_min_sync =
__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
static ssize_t
max_sync_show(struct mddev *mddev, char *page)
{
if (mddev->resync_max == MaxSector)
return sprintf(page, "max\n");
else
return sprintf(page, "%llu\n",
(unsigned long long)mddev->resync_max);
}
static ssize_t
max_sync_store(struct mddev *mddev, const char *buf, size_t len)
{
int err;
spin_lock(&mddev->lock);
if (strncmp(buf, "max", 3) == 0)
mddev->resync_max = MaxSector;
else {
unsigned long long max;
int chunk;
err = -EINVAL;
if (kstrtoull(buf, 10, &max))
goto out_unlock;
if (max < mddev->resync_min)
goto out_unlock;
err = -EBUSY;
if (max < mddev->resync_max &&
mddev->ro == 0 &&
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
goto out_unlock;
/* Must be a multiple of chunk_size */
chunk = mddev->chunk_sectors;
if (chunk) {
sector_t temp = max;
err = -EINVAL;
if (sector_div(temp, chunk))
goto out_unlock;
}
mddev->resync_max = max;
}
wake_up(&mddev->recovery_wait);
err = 0;
out_unlock:
spin_unlock(&mddev->lock);
return err ?: len;
}
static struct md_sysfs_entry md_max_sync =
__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
static ssize_t
suspend_lo_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
}
static ssize_t
suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long long old, new;
int err;
err = kstrtoull(buf, 10, &new);
if (err < 0)
return err;
if (new != (sector_t)new)
return -EINVAL;
err = mddev_lock(mddev);
if (err)
return err;
err = -EINVAL;
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
goto unlock;
old = mddev->suspend_lo;
mddev->suspend_lo = new;
if (new >= old)
/* Shrinking suspended region */
mddev->pers->quiesce(mddev, 2);
else {
/* Expanding suspended region - need to wait */
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
err = 0;
unlock:
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_suspend_lo =
__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
static ssize_t
suspend_hi_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
}
static ssize_t
suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long long old, new;
int err;
err = kstrtoull(buf, 10, &new);
if (err < 0)
return err;
if (new != (sector_t)new)
return -EINVAL;
err = mddev_lock(mddev);
if (err)
return err;
err = -EINVAL;
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
goto unlock;
old = mddev->suspend_hi;
mddev->suspend_hi = new;
if (new <= old)
/* Shrinking suspended region */
mddev->pers->quiesce(mddev, 2);
else {
/* Expanding suspended region - need to wait */
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
err = 0;
unlock:
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_suspend_hi =
__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
static ssize_t
reshape_position_show(struct mddev *mddev, char *page)
{
if (mddev->reshape_position != MaxSector)
return sprintf(page, "%llu\n",
(unsigned long long)mddev->reshape_position);
strcpy(page, "none\n");
return 5;
}
static ssize_t
reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
{
struct md_rdev *rdev;
unsigned long long new;
int err;
err = kstrtoull(buf, 10, &new);
if (err < 0)
return err;
if (new != (sector_t)new)
return -EINVAL;
err = mddev_lock(mddev);
if (err)
return err;
err = -EBUSY;
if (mddev->pers)
goto unlock;
mddev->reshape_position = new;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
rdev_for_each(rdev, mddev)
rdev->new_data_offset = rdev->data_offset;
err = 0;
unlock:
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_reshape_position =
__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
reshape_position_store);
static ssize_t
reshape_direction_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%s\n",
mddev->reshape_backwards ? "backwards" : "forwards");
}
static ssize_t
reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
{
int backwards = 0;
int err;
if (cmd_match(buf, "forwards"))
backwards = 0;
else if (cmd_match(buf, "backwards"))
backwards = 1;
else
return -EINVAL;
if (mddev->reshape_backwards == backwards)
return len;
err = mddev_lock(mddev);
if (err)
return err;
/* check if we are allowed to change */
if (mddev->delta_disks)
err = -EBUSY;
else if (mddev->persistent &&
mddev->major_version == 0)
err = -EINVAL;
else
mddev->reshape_backwards = backwards;
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_reshape_direction =
__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
reshape_direction_store);
static ssize_t
array_size_show(struct mddev *mddev, char *page)
{
if (mddev->external_size)
return sprintf(page, "%llu\n",
(unsigned long long)mddev->array_sectors/2);
else
return sprintf(page, "default\n");
}
static ssize_t
array_size_store(struct mddev *mddev, const char *buf, size_t len)
{
sector_t sectors;
int err;
err = mddev_lock(mddev);
if (err)
return err;
if (strncmp(buf, "default", 7) == 0) {
if (mddev->pers)
sectors = mddev->pers->size(mddev, 0, 0);
else
sectors = mddev->array_sectors;
mddev->external_size = 0;
} else {
if (strict_blocks_to_sectors(buf, §ors) < 0)
err = -EINVAL;
else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
err = -E2BIG;
else
mddev->external_size = 1;
}
if (!err) {
mddev->array_sectors = sectors;
if (mddev->pers) {
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
}
}
mddev_unlock(mddev);
return err ?: len;
}
static struct md_sysfs_entry md_array_size =
__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
array_size_store);
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_layout.attr,
&md_raid_disks.attr,
&md_chunk_size.attr,
&md_size.attr,
&md_resync_start.attr,
&md_metadata.attr,
&md_new_device.attr,
&md_safe_delay.attr,
&md_array_state.attr,
&md_reshape_position.attr,
&md_reshape_direction.attr,
&md_array_size.attr,
&max_corr_read_errors.attr,
NULL,
};
static struct attribute *md_redundancy_attrs[] = {
&md_scan_mode.attr,
&md_last_scan_mode.attr,
&md_mismatches.attr,
&md_sync_min.attr,
&md_sync_max.attr,
&md_sync_speed.attr,
&md_sync_force_parallel.attr,
&md_sync_completed.attr,
&md_min_sync.attr,
&md_max_sync.attr,
&md_suspend_lo.attr,
&md_suspend_hi.attr,
&md_bitmap.attr,
&md_degraded.attr,
NULL,
};
static struct attribute_group md_redundancy_group = {
.name = NULL,
.attrs = md_redundancy_attrs,
};
static ssize_t
md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
struct mddev *mddev = container_of(kobj, struct mddev, kobj);
ssize_t rv;
if (!entry->show)
return -EIO;
spin_lock(&all_mddevs_lock);
if (list_empty(&mddev->all_mddevs)) {
spin_unlock(&all_mddevs_lock);
return -EBUSY;
}
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
rv = entry->show(mddev, page);
mddev_put(mddev);
return rv;
}
static ssize_t
md_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
struct mddev *mddev = container_of(kobj, struct mddev, kobj);
ssize_t rv;
if (!entry->store)
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
spin_lock(&all_mddevs_lock);
if (list_empty(&mddev->all_mddevs)) {
spin_unlock(&all_mddevs_lock);
return -EBUSY;
}
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
rv = entry->store(mddev, page, length);
mddev_put(mddev);
return rv;
}
static void md_free(struct kobject *ko)
{
struct mddev *mddev = container_of(ko, struct mddev, kobj);
if (mddev->sysfs_state)
sysfs_put(mddev->sysfs_state);
if (mddev->queue)
blk_cleanup_queue(mddev->queue);
if (mddev->gendisk) {
del_gendisk(mddev->gendisk);
put_disk(mddev->gendisk);
}
kfree(mddev);
}
static const struct sysfs_ops md_sysfs_ops = {
.show = md_attr_show,
.store = md_attr_store,
};
static struct kobj_type md_ktype = {
.release = md_free,
.sysfs_ops = &md_sysfs_ops,
.default_attrs = md_default_attrs,
};
int mdp_major = 0;
static void mddev_delayed_delete(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
}
static int md_alloc(dev_t dev, char *name)
{
static DEFINE_MUTEX(disks_mutex);
struct mddev *mddev = mddev_find(dev);
struct gendisk *disk;
int partitioned;
int shift;
int unit;
int error;
if (!mddev)
return -ENODEV;
partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
shift = partitioned ? MdpMinorShift : 0;
unit = MINOR(mddev->unit) >> shift;
/* wait for any previous instance of this device to be
* completely removed (mddev_delayed_delete).
*/
flush_workqueue(md_misc_wq);
mutex_lock(&disks_mutex);
error = -EEXIST;
if (mddev->gendisk)
goto abort;
if (name) {
/* Need to ensure that 'name' is not a duplicate.
*/
struct mddev *mddev2;
spin_lock(&all_mddevs_lock);
list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
if (mddev2->gendisk &&
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
goto abort;
}
spin_unlock(&all_mddevs_lock);
}
error = -ENOMEM;
mddev->queue = blk_alloc_queue(GFP_KERNEL);
if (!mddev->queue)
goto abort;
mddev->queue->queuedata = mddev;
blk_queue_make_request(mddev->queue, md_make_request);
blk_set_stacking_limits(&mddev->queue->limits);
disk = alloc_disk(1 << shift);
if (!disk) {
blk_cleanup_queue(mddev->queue);
mddev->queue = NULL;
goto abort;
}
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
if (name)
strcpy(disk->disk_name, name);
else if (partitioned)
sprintf(disk->disk_name, "md_d%d", unit);
else
sprintf(disk->disk_name, "md%d", unit);
disk->fops = &md_fops;
disk->private_data = mddev;
disk->queue = mddev->queue;
blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
/* Allow extended partitions. This makes the
* 'mdp' device redundant, but we can't really
* remove it now.
*/
disk->flags |= GENHD_FL_EXT_DEVT;
mddev->gendisk = disk;
/* As soon as we call add_disk(), another thread could get
* through to md_open, so make sure it doesn't get too far
*/
mutex_lock(&mddev->open_mutex);
add_disk(disk);
error = kobject_init_and_add(&mddev->kobj, &md_ktype,
&disk_to_dev(disk)->kobj, "%s", "md");
if (error) {
/* This isn't possible, but as kobject_init_and_add is marked
* __must_check, we must do something with the result
*/
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
disk->disk_name);
error = 0;
}
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
printk(KERN_DEBUG "pointless warning\n");
mutex_unlock(&mddev->open_mutex);
abort:
mutex_unlock(&disks_mutex);
if (!error && mddev->kobj.sd) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
}
mddev_put(mddev);
return error;
}
static struct kobject *md_probe(dev_t dev, int *part, void *data)
{
md_alloc(dev, NULL);
return NULL;
}
static int add_named_array(const char *val, struct kernel_param *kp)
{
/* val must be "md_*" where * is not all digits.
* We allocate an array with a large free minor number, and
* set the name to val. val must not already be an active name.
*/
int len = strlen(val);
char buf[DISK_NAME_LEN];
while (len && val[len-1] == '\n')
len--;
if (len >= DISK_NAME_LEN)
return -E2BIG;
strlcpy(buf, val, len+1);
if (strncmp(buf, "md_", 3) != 0)
return -EINVAL;
return md_alloc(0, buf);
}
static void md_safemode_timeout(unsigned long data)
{
struct mddev *mddev = (struct mddev *) data;
if (!atomic_read(&mddev->writes_pending)) {
mddev->safemode = 1;
if (mddev->external)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
md_wakeup_thread(mddev->thread);
}
static int start_dirty_degraded;
int md_run(struct mddev *mddev)
{
int err;
struct md_rdev *rdev;
struct md_personality *pers;
if (list_empty(&mddev->disks))
/* cannot run an array with no devices.. */
return -EINVAL;
if (mddev->pers)
return -EBUSY;
/* Cannot run until previous stop completes properly */
if (mddev->sysfs_active)
return -EBUSY;
/*
* Analyze all RAID superblock(s)
*/
if (!mddev->raid_disks) {
if (!mddev->persistent)
return -EINVAL;
analyze_sbs(mddev);
}
if (mddev->level != LEVEL_NONE)
request_module("md-level-%d", mddev->level);
else if (mddev->clevel[0])
request_module("md-%s", mddev->clevel);
/*
* Drop all container device buffers, from now on
* the only valid external interface is through the md
* device.
*/
rdev_for_each(rdev, mddev) {
if (test_bit(Faulty, &rdev->flags))
continue;
sync_blockdev(rdev->bdev);
invalidate_bdev(rdev->bdev);
/* perform some consistency tests on the device.
* We don't want the data to overlap the metadata,
* Internal Bitmap issues have been handled elsewhere.
*/
if (rdev->meta_bdev) {
/* Nothing to check */;
} else if (rdev->data_offset < rdev->sb_start) {
if (mddev->dev_sectors &&
rdev->data_offset + mddev->dev_sectors
> rdev->sb_start) {
printk("md: %s: data overlaps metadata\n",
mdname(mddev));
return -EINVAL;
}
} else {
if (rdev->sb_start + rdev->sb_size/512
> rdev->data_offset) {
printk("md: %s: metadata overlaps data\n",
mdname(mddev));
return -EINVAL;
}
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
if (mddev->bio_set == NULL)
mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
if (mddev->level != LEVEL_NONE)
printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
mddev->level);
else
printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
mddev->clevel);
return -EINVAL;
}
spin_unlock(&pers_lock);
if (mddev->level != pers->level) {
mddev->level = pers->level;
mddev->new_level = pers->level;
}
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
if (mddev->reshape_position != MaxSector &&
pers->start_reshape == NULL) {
/* This personality cannot handle reshaping... */
module_put(pers->owner);
return -EINVAL;
}
if (pers->sync_request) {
/* Warn if this is a potentially silly
* configuration.
*/
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct md_rdev *rdev2;
int warned = 0;
rdev_for_each(rdev, mddev)
rdev_for_each(rdev2, mddev) {
if (rdev < rdev2 &&
rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
printk(KERN_WARNING
"%s: WARNING: %s appears to be"
" on the same physical disk as"
" %s.\n",
mdname(mddev),
bdevname(rdev->bdev,b),
bdevname(rdev2->bdev,b2));
warned = 1;
}
}
if (warned)
printk(KERN_WARNING
"True protection against single-disk"
" failure might be compromised.\n");
}
mddev->recovery = 0;
/* may be over-ridden by personality */
mddev->resync_max_sectors = mddev->dev_sectors;
mddev->ok_start_degraded = start_dirty_degraded;
if (start_readonly && mddev->ro == 0)
mddev->ro = 2; /* read-only, but switch on first write */
err = pers->run(mddev);
if (err)
printk(KERN_ERR "md: pers->run() failed ...\n");
else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
WARN_ONCE(!mddev->external_size, "%s: default size too small,"
" but 'external_size' not in effect?\n", __func__);
printk(KERN_ERR
"md: invalid array_size %llu > default size %llu\n",
(unsigned long long)mddev->array_sectors / 2,
(unsigned long long)pers->size(mddev, 0, 0) / 2);
err = -EINVAL;
}
if (err == 0 && pers->sync_request &&
(mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
struct bitmap *bitmap;
bitmap = bitmap_create(mddev, -1);
if (IS_ERR(bitmap)) {
err = PTR_ERR(bitmap);
printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
mdname(mddev), err);
} else
mddev->bitmap = bitmap;
}
if (err) {
mddev_detach(mddev);
if (mddev->private)
pers->free(mddev, mddev->private);
mddev->private = NULL;
module_put(pers->owner);
bitmap_destroy(mddev);
return err;
}
if (mddev->queue) {
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = md_congested;
blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec);
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_redundancy_group))
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
atomic_set(&mddev->writes_pending,0);
atomic_set(&mddev->max_corr_read_errors,
MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
mddev->safemode = 0;
mddev->safemode_timer.function = md_safemode_timeout;
mddev->safemode_timer.data = (unsigned long) mddev;
mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
mddev->in_sync = 1;
smp_wmb();
spin_lock(&mddev->lock);
mddev->pers = pers;
mddev->ready = 1;
spin_unlock(&mddev->lock);
rdev_for_each(rdev, mddev)
if (rdev->raid_disk >= 0)
if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
if (mddev->flags & MD_UPDATE_SB_FLAGS)
md_update_sb(mddev, 0);
md_new_event(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
sysfs_notify_dirent_safe(mddev->sysfs_action);
sysfs_notify(&mddev->kobj, NULL, "degraded");
return 0;
}
EXPORT_SYMBOL_GPL(md_run);
static int do_md_run(struct mddev *mddev)
{
int err;
err = md_run(mddev);
if (err)
goto out;
err = bitmap_load(mddev);
if (err) {
bitmap_destroy(mddev);
goto out;
}
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
out:
return err;
}
static int restart_array(struct mddev *mddev)
{
struct gendisk *disk = mddev->gendisk;
/* Complain if it has no devices */
if (list_empty(&mddev->disks))
return -ENXIO;
if (!mddev->pers)
return -EINVAL;
if (!mddev->ro)
return -EBUSY;
mddev->safemode = 0;
mddev->ro = 0;
set_disk_ro(disk, 0);
printk(KERN_INFO "md: %s switched to read-write mode.\n",
mdname(mddev));
/* Kick recovery or resync if necessary */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
static void md_clean(struct mddev *mddev)
{
mddev->array_sectors = 0;
mddev->external_size = 0;
mddev->dev_sectors = 0;
mddev->raid_disks = 0;
mddev->recovery_cp = 0;
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->reshape_position = MaxSector;
mddev->external = 0;
mddev->persistent = 0;
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
mddev->flags = 0;
mddev->ro = 0;
mddev->metadata_type[0] = 0;
mddev->chunk_sectors = 0;
mddev->ctime = mddev->utime = 0;
mddev->layout = 0;
mddev->max_disks = 0;
mddev->events = 0;
mddev->can_decrease_events = 0;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
mddev->new_level = LEVEL_NONE;
mddev->new_layout = 0;
mddev->new_chunk_sectors = 0;
mddev->curr_resync = 0;
atomic64_set(&mddev->resync_mismatches, 0);
mddev->suspend_lo = mddev->suspend_hi = 0;
mddev->sync_speed_min = mddev->sync_speed_max = 0;
mddev->recovery = 0;
mddev->in_sync = 0;
mddev->changed = 0;
mddev->degraded = 0;
mddev->safemode = 0;
mddev->private = NULL;
mddev->merge_check_needed = 0;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.default_offset = 0;
mddev->bitmap_info.default_space = 0;
mddev->bitmap_info.chunksize = 0;
mddev->bitmap_info.daemon_sleep = 0;
mddev->bitmap_info.max_write_behind = 0;
}
static void __md_stop_writes(struct mddev *mddev)
{
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
}
del_timer_sync(&mddev->safemode_timer);
bitmap_flush(mddev);
md_super_wait(mddev);
if (mddev->ro == 0 &&
(!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) {
/* mark array as shutdown cleanly */
mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
}
void md_stop_writes(struct mddev *mddev)
{
mddev_lock_nointr(mddev);
__md_stop_writes(mddev);
mddev_unlock(mddev);
}
EXPORT_SYMBOL_GPL(md_stop_writes);
static void mddev_detach(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
/* wait for behind writes to complete */
if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n",
mdname(mddev));
/* need to kick something here to make sure I/O goes? */
wait_event(bitmap->behind_wait,
atomic_read(&bitmap->behind_writes) == 0);
}
if (mddev->pers && mddev->pers->quiesce) {
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
md_unregister_thread(&mddev->thread);
if (mddev->queue)
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
}
static void __md_stop(struct mddev *mddev)
{
struct md_personality *pers = mddev->pers;
mddev_detach(mddev);
/* Ensure ->event_work is done */
flush_workqueue(md_misc_wq);
spin_lock(&mddev->lock);
mddev->ready = 0;
mddev->pers = NULL;
spin_unlock(&mddev->lock);
pers->free(mddev, mddev->private);
mddev->private = NULL;
if (pers->sync_request && mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
module_put(pers->owner);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
void md_stop(struct mddev *mddev)
{
/* stop the array and free an attached data structures.
* This is called from dm-raid
*/
__md_stop(mddev);
bitmap_destroy(mddev);
if (mddev->bio_set)
bioset_free(mddev->bio_set);
}
EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
{
int err = 0;
int did_freeze = 0;
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
did_freeze = 1;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
if (mddev->sync_thread)
/* Thread might be blocked waiting for metadata update
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
mddev_unlock(mddev);
wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
&mddev->recovery));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
err = -EBUSY;
goto out;
}
if (mddev->pers) {
__md_stop_writes(mddev);
err = -ENXIO;
if (mddev->ro==1)
goto out;
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
err = 0;
}
out:
mutex_unlock(&mddev->open_mutex);
return err;
}
/* mode:
* 0 - completely stop and dis-assemble array
* 2 - stop but do not disassemble array
*/
static int do_md_stop(struct mddev *mddev, int mode,
struct block_device *bdev)
{
struct gendisk *disk = mddev->gendisk;
struct md_rdev *rdev;
int did_freeze = 0;
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
did_freeze = 1;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
if (mddev->sync_thread)
/* Thread might be blocked waiting for metadata update
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
mddev_unlock(mddev);
wait_event(resync_wait, (mddev->sync_thread == NULL &&
!test_bit(MD_RECOVERY_RUNNING,
&mddev->recovery)));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sysfs_active ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
return -EBUSY;
}
if (mddev->pers) {
if (mddev->ro)
set_disk_ro(disk, 0);
__md_stop_writes(mddev);
__md_stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state);
rdev_for_each(rdev, mddev)
if (rdev->raid_disk >= 0)
sysfs_unlink_rdev(mddev, rdev);
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
mddev->changed = 1;
revalidate_disk(disk);
if (mddev->ro)
mddev->ro = 0;
} else
mutex_unlock(&mddev->open_mutex);
/*
* Free resources if final stop
*/
if (mode == 0) {
printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
bitmap_destroy(mddev);
if (mddev->bitmap_info.file) {
struct file *f = mddev->bitmap_info.file;
spin_lock(&mddev->lock);
mddev->bitmap_info.file = NULL;
spin_unlock(&mddev->lock);
fput(f);
}
mddev->bitmap_info.offset = 0;
export_array(mddev);
md_clean(mddev);
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
}
blk_integrity_unregister(disk);
md_new_event(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
#ifndef MODULE
static void autorun_array(struct mddev *mddev)
{
struct md_rdev *rdev;
int err;
if (list_empty(&mddev->disks))
return;
printk(KERN_INFO "md: running: ");
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
printk("<%s>", bdevname(rdev->bdev,b));
}
printk("\n");
err = do_md_run(mddev);
if (err) {
printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
do_md_stop(mddev, 0, NULL);
}
}
/*
* lets try to run arrays based on all disks that have arrived
* until now. (those are in pending_raid_disks)
*
* the method: pick the first pending disk, collect all disks with
* the same UUID, remove all from the pending list and put them into
* the 'same_array' list. Then order this list based on superblock
* update time (freshest comes first), kick out 'old' disks and
* compare superblocks. If everything's fine then run it.
*
* If "unit" is allocated, then bump its reference count
*/
static void autorun_devices(int part)
{
struct md_rdev *rdev0, *rdev, *tmp;
struct mddev *mddev;
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: autorun ...\n");
while (!list_empty(&pending_raid_disks)) {
int unit;
dev_t dev;
LIST_HEAD(candidates);
rdev0 = list_entry(pending_raid_disks.next,
struct md_rdev, same_set);
printk(KERN_INFO "md: considering %s ...\n",
bdevname(rdev0->bdev,b));
INIT_LIST_HEAD(&candidates);
rdev_for_each_list(rdev, tmp, &pending_raid_disks)
if (super_90_load(rdev, rdev0, 0) >= 0) {
printk(KERN_INFO "md: adding %s ...\n",
bdevname(rdev->bdev,b));
list_move(&rdev->same_set, &candidates);
}
/*
* now we have a set of devices, with all of them having
* mostly sane superblocks. It's time to allocate the
* mddev.
*/
if (part) {
dev = MKDEV(mdp_major,
rdev0->preferred_minor << MdpMinorShift);
unit = MINOR(dev) >> MdpMinorShift;
} else {
dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
unit = MINOR(dev);
}
if (rdev0->preferred_minor != unit) {
printk(KERN_INFO "md: unit number in %s is bad: %d\n",
bdevname(rdev0->bdev, b), rdev0->preferred_minor);
break;
}
md_probe(dev, NULL, NULL);
mddev = mddev_find(dev);
if (!mddev || !mddev->gendisk) {
if (mddev)
mddev_put(mddev);
printk(KERN_ERR
"md: cannot allocate memory for md drive.\n");
break;
}
if (mddev_lock(mddev))
printk(KERN_WARNING "md: %s locked, cannot run\n",
mdname(mddev));
else if (mddev->raid_disks || mddev->major_version
|| !list_empty(&mddev->disks)) {
printk(KERN_WARNING
"md: %s already running, cannot run %s\n",
mdname(mddev), bdevname(rdev0->bdev,b));
mddev_unlock(mddev);
} else {
printk(KERN_INFO "md: created %s\n", mdname(mddev));
mddev->persistent = 1;
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
if (bind_rdev_to_array(rdev, mddev))
export_rdev(rdev);
}
autorun_array(mddev);
mddev_unlock(mddev);
}
/* on success, candidates will be empty, on error
* it won't...
*/
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
export_rdev(rdev);
}
mddev_put(mddev);
}
printk(KERN_INFO "md: ... autorun DONE.\n");
}
#endif /* !MODULE */
static int get_version(void __user *arg)
{
mdu_version_t ver;
ver.major = MD_MAJOR_VERSION;
ver.minor = MD_MINOR_VERSION;
ver.patchlevel = MD_PATCHLEVEL_VERSION;
if (copy_to_user(arg, &ver, sizeof(ver)))
return -EFAULT;
return 0;
}
static int get_array_info(struct mddev *mddev, void __user *arg)
{
mdu_array_info_t info;
int nr,working,insync,failed,spare;
struct md_rdev *rdev;
nr = working = insync = failed = spare = 0;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
nr++;
if (test_bit(Faulty, &rdev->flags))
failed++;
else {
working++;
if (test_bit(In_sync, &rdev->flags))
insync++;
else
spare++;
}
}
rcu_read_unlock();
info.major_version = mddev->major_version;
info.minor_version = mddev->minor_version;
info.patch_version = MD_PATCHLEVEL_VERSION;
info.ctime = mddev->ctime;
info.level = mddev->level;
info.size = mddev->dev_sectors / 2;
if (info.size != mddev->dev_sectors / 2) /* overflow */
info.size = -1;
info.nr_disks = nr;
info.raid_disks = mddev->raid_disks;
info.md_minor = mddev->md_minor;
info.not_persistent= !mddev->persistent;
info.utime = mddev->utime;
info.state = 0;
if (mddev->in_sync)
info.state = (1<<MD_SB_CLEAN);
if (mddev->bitmap && mddev->bitmap_info.offset)
info.state |= (1<<MD_SB_BITMAP_PRESENT);
if (mddev_is_clustered(mddev))
info.state |= (1<<MD_SB_CLUSTERED);
info.active_disks = insync;
info.working_disks = working;
info.failed_disks = failed;
info.spare_disks = spare;
info.layout = mddev->layout;
info.chunk_size = mddev->chunk_sectors << 9;
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int get_bitmap_file(struct mddev *mddev, void __user * arg)
{
mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
char *ptr;
int err;
file = kzalloc(sizeof(*file), GFP_NOIO);
if (!file)
return -ENOMEM;
err = 0;
spin_lock(&mddev->lock);
/* bitmap disabled, zero the first byte and copy out */
if (!mddev->bitmap_info.file)
file->pathname[0] = '\0';
else if ((ptr = file_path(mddev->bitmap_info.file,
file->pathname, sizeof(file->pathname))),
IS_ERR(ptr))
err = PTR_ERR(ptr);
else
memmove(file->pathname, ptr,
sizeof(file->pathname)-(ptr-file->pathname));
spin_unlock(&mddev->lock);
if (err == 0 &&
copy_to_user(arg, file, sizeof(*file)))
err = -EFAULT;
kfree(file);
return err;
}
static int get_disk_info(struct mddev *mddev, void __user * arg)
{
mdu_disk_info_t info;
struct md_rdev *rdev;
if (copy_from_user(&info, arg, sizeof(info)))
return -EFAULT;
rcu_read_lock();
rdev = md_find_rdev_nr_rcu(mddev, info.number);
if (rdev) {
info.major = MAJOR(rdev->bdev->bd_dev);
info.minor = MINOR(rdev->bdev->bd_dev);
info.raid_disk = rdev->raid_disk;
info.state = 0;
if (test_bit(Faulty, &rdev->flags))
info.state |= (1<<MD_DISK_FAULTY);
else if (test_bit(In_sync, &rdev->flags)) {
info.state |= (1<<MD_DISK_ACTIVE);
info.state |= (1<<MD_DISK_SYNC);
}
if (test_bit(WriteMostly, &rdev->flags))
info.state |= (1<<MD_DISK_WRITEMOSTLY);
} else {
info.major = info.minor = 0;
info.raid_disk = -1;
info.state = (1<<MD_DISK_REMOVED);
}
rcu_read_unlock();
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
{
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct md_rdev *rdev;
dev_t dev = MKDEV(info->major,info->minor);
if (mddev_is_clustered(mddev) &&
!(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
pr_err("%s: Cannot add to clustered mddev.\n",
mdname(mddev));
return -EINVAL;
}
if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
return -EOVERFLOW;
if (!mddev->raid_disks) {
int err;
/* expecting a device which has a superblock */
rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
if (!list_empty(&mddev->disks)) {
struct md_rdev *rdev0
= list_entry(mddev->disks.next,
struct md_rdev, same_set);
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) {
printk(KERN_WARNING
"md: %s has different UUID to %s\n",
bdevname(rdev->bdev,b),
bdevname(rdev0->bdev,b2));
export_rdev(rdev);
return -EINVAL;
}
}
err = bind_rdev_to_array(rdev, mddev);
if (err)
export_rdev(rdev);
return err;
}
/*
* add_new_disk can be used once the array is assembled
* to add "hot spares". They must already have a superblock
* written
*/
if (mddev->pers) {
int err;
if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING
"%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
}
if (mddev->persistent)
rdev = md_import_device(dev, mddev->major_version,
mddev->minor_version);
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
/* set saved_raid_disk if appropriate */
if (!mddev->persistent) {
if (info->state & (1<<MD_DISK_SYNC) &&
info->raid_disk < mddev->raid_disks) {
rdev->raid_disk = info->raid_disk;
set_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
} else
rdev->raid_disk = -1;
rdev->saved_raid_disk = rdev->raid_disk;
} else
super_types[mddev->major_version].
validate_super(mddev, rdev);
if ((info->state & (1<<MD_DISK_SYNC)) &&
rdev->raid_disk != info->raid_disk) {
/* This was a hot-add request, but events doesn't
* match, so reject it.
*/
export_rdev(rdev);
return -EINVAL;
}
clear_bit(In_sync, &rdev->flags); /* just to be sure */
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
else
clear_bit(WriteMostly, &rdev->flags);
/*
* check whether the device shows up in other nodes
*/
if (mddev_is_clustered(mddev)) {
if (info->state & (1 << MD_DISK_CANDIDATE)) {
/* Through --cluster-confirm */
set_bit(Candidate, &rdev->flags);
err = md_cluster_ops->new_disk_ack(mddev, true);
if (err) {
export_rdev(rdev);
return err;
}
} else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
/* --add initiated by this node */
err = md_cluster_ops->add_new_disk_start(mddev, rdev);
if (err) {
md_cluster_ops->add_new_disk_finish(mddev);
export_rdev(rdev);
return err;
}
}
}
rdev->raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (err)
export_rdev(rdev);
else
err = add_bound_rdev(rdev);
if (mddev_is_clustered(mddev) &&
(info->state & (1 << MD_DISK_CLUSTER_ADD)))
md_cluster_ops->add_new_disk_finish(mddev);
return err;
}
/* otherwise, add_new_disk is only allowed
* for major_version==0 superblocks
*/
if (mddev->major_version != 0) {
printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
mdname(mddev));
return -EINVAL;
}
if (!(info->state & (1<<MD_DISK_FAULTY))) {
int err;
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
rdev->desc_nr = info->number;
if (info->raid_disk < mddev->raid_disks)
rdev->raid_disk = info->raid_disk;
else
rdev->raid_disk = -1;
if (rdev->raid_disk < mddev->raid_disks)
if (info->state & (1<<MD_DISK_SYNC))
set_bit(In_sync, &rdev->flags);
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
if (!mddev->persistent) {
printk(KERN_INFO "md: nonpersistent superblock ...\n");
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
} else
rdev->sb_start = calc_dev_sboffset(rdev);
rdev->sectors = rdev->sb_start;
err = bind_rdev_to_array(rdev, mddev);
if (err) {
export_rdev(rdev);
return err;
}
}
return 0;
}
static int hot_remove_disk(struct mddev *mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
struct md_rdev *rdev;
rdev = find_rdev(mddev, dev);
if (!rdev)
return -ENXIO;
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
clear_bit(Blocked, &rdev->flags);
remove_and_add_spares(mddev, rdev);
if (rdev->raid_disk >= 0)
goto busy;
if (mddev_is_clustered(mddev))
md_cluster_ops->remove_disk(mddev, rdev);
md_kick_rdev_from_array(rdev);
md_update_sb(mddev, 1);
md_new_event(mddev);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
return 0;
busy:
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_cancel(mddev);
printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
bdevname(rdev->bdev,b), mdname(mddev));
return -EBUSY;
}
static int hot_add_disk(struct mddev *mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
int err;
struct md_rdev *rdev;
if (!mddev->pers)
return -ENODEV;
if (mddev->major_version != 0) {
printk(KERN_WARNING "%s: HOT_ADD may only be used with"
" version-0 superblocks.\n",
mdname(mddev));
return -EINVAL;
}
if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING
"%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
}
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return -EINVAL;
}
if (mddev->persistent)
rdev->sb_start = calc_dev_sboffset(rdev);
else
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
rdev->sectors = rdev->sb_start;
if (test_bit(Faulty, &rdev->flags)) {
printk(KERN_WARNING
"md: can not hot-add faulty %s disk to %s!\n",
bdevname(rdev->bdev,b), mdname(mddev));
err = -EINVAL;
goto abort_export;
}
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
clear_bit(In_sync, &rdev->flags);
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (err)
goto abort_clustered;
/*
* The rest should better be atomic, we can have disk failures
* noticed in interrupt contexts ...
*/
rdev->raid_disk = -1;
md_update_sb(mddev, 1);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
*/
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_new_event(mddev);
return 0;
abort_clustered:
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_cancel(mddev);
abort_export:
export_rdev(rdev);
return err;
}
static int set_bitmap_file(struct mddev *mddev, int fd)
{
int err = 0;
if (mddev->pers) {
if (!mddev->pers->quiesce || !mddev->thread)
return -EBUSY;
if (mddev->recovery || mddev->sync_thread)
return -EBUSY;
/* we should be able to change the bitmap.. */
}
if (fd >= 0) {
struct inode *inode;
struct file *f;
if (mddev->bitmap || mddev->bitmap_info.file)
return -EEXIST; /* cannot add when bitmap is present */
f = fget(fd);
if (f == NULL) {
printk(KERN_ERR "%s: error: failed to get bitmap file\n",
mdname(mddev));
return -EBADF;
}
inode = f->f_mapping->host;
if (!S_ISREG(inode->i_mode)) {
printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
mdname(mddev));
err = -EBADF;
} else if (!(f->f_mode & FMODE_WRITE)) {
printk(KERN_ERR "%s: error: bitmap file must open for write\n",
mdname(mddev));
err = -EBADF;
} else if (atomic_read(&inode->i_writecount) != 1) {
printk(KERN_ERR "%s: error: bitmap file is already in use\n",
mdname(mddev));
err = -EBUSY;
}
if (err) {
fput(f);
return err;
}
mddev->bitmap_info.file = f;
mddev->bitmap_info.offset = 0; /* file overrides offset */
} else if (mddev->bitmap == NULL)
return -ENOENT; /* cannot remove what isn't there */
err = 0;
if (mddev->pers) {
mddev->pers->quiesce(mddev, 1);
if (fd >= 0) {
struct bitmap *bitmap;
bitmap = bitmap_create(mddev, -1);
if (!IS_ERR(bitmap)) {
mddev->bitmap = bitmap;
err = bitmap_load(mddev);
} else
err = PTR_ERR(bitmap);
}
if (fd < 0 || err) {
bitmap_destroy(mddev);
fd = -1; /* make sure to put the file */
}
mddev->pers->quiesce(mddev, 0);
}
if (fd < 0) {
struct file *f = mddev->bitmap_info.file;
if (f) {
spin_lock(&mddev->lock);
mddev->bitmap_info.file = NULL;
spin_unlock(&mddev->lock);
fput(f);
}
}
return err;
}
/*
* set_array_info is used two different ways
* The original usage is when creating a new array.
* In this usage, raid_disks is > 0 and it together with
* level, size, not_persistent,layout,chunksize determine the
* shape of the array.
* This will always create an array with a type-0.90.0 superblock.
* The newer usage is when assembling an array.
* In this case raid_disks will be 0, and the major_version field is
* use to determine which style super-blocks are to be found on the devices.
* The minor and patch _version numbers are also kept incase the
* super_block handler wishes to interpret them.
*/
static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
{
if (info->raid_disks == 0) {
/* just setting version number for superblock loading */
if (info->major_version < 0 ||
info->major_version >= ARRAY_SIZE(super_types) ||
super_types[info->major_version].name == NULL) {
/* maybe try to auto-load a module? */
printk(KERN_INFO
"md: superblock version %d not known\n",
info->major_version);
return -EINVAL;
}
mddev->major_version = info->major_version;
mddev->minor_version = info->minor_version;
mddev->patch_version = info->patch_version;
mddev->persistent = !info->not_persistent;
/* ensure mddev_put doesn't delete this now that there
* is some minimal configuration.
*/
mddev->ctime = get_seconds();
return 0;
}
mddev->major_version = MD_MAJOR_VERSION;
mddev->minor_version = MD_MINOR_VERSION;
mddev->patch_version = MD_PATCHLEVEL_VERSION;
mddev->ctime = get_seconds();
mddev->level = info->level;
mddev->clevel[0] = 0;
mddev->dev_sectors = 2 * (sector_t)info->size;
mddev->raid_disks = info->raid_disks;
/* don't set md_minor, it is determined by which /dev/md* was
* openned
*/
if (info->state & (1<<MD_SB_CLEAN))
mddev->recovery_cp = MaxSector;
else
mddev->recovery_cp = 0;
mddev->persistent = ! info->not_persistent;
mddev->external = 0;
mddev->layout = info->layout;
mddev->chunk_sectors = info->chunk_size >> 9;
mddev->max_disks = MD_SB_DISKS;
if (mddev->persistent)
mddev->flags = 0;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
mddev->bitmap_info.offset = 0;
mddev->reshape_position = MaxSector;
/*
* Generate a 128 bit UUID
*/
get_random_bytes(mddev->uuid, 16);
mddev->new_level = mddev->level;
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->new_layout = mddev->layout;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
return 0;
}
void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
{
WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
if (mddev->external_size)
return;
mddev->array_sectors = array_sectors;
}
EXPORT_SYMBOL(md_set_array_sectors);
static int update_size(struct mddev *mddev, sector_t num_sectors)
{
struct md_rdev *rdev;
int rv;
int fit = (num_sectors == 0);
if (mddev->pers->resize == NULL)
return -EINVAL;
/* The "num_sectors" is the number of sectors of each device that
* is used. This can only make sense for arrays with redundancy.
* linear and raid0 always use whatever space is available. We can only
* consider changing this number if no resync or reconstruction is
* happening, and if the new size is acceptable. It must fit before the
* sb_start or, if that is <data_offset, it must fit before the size
* of each device. If num_sectors is zero, we find the largest size
* that fits.
*/
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
mddev->sync_thread)
return -EBUSY;
if (mddev->ro)
return -EROFS;
rdev_for_each(rdev, mddev) {
sector_t avail = rdev->sectors;
if (fit && (num_sectors == 0 || num_sectors > avail))
num_sectors = avail;
if (avail < num_sectors)
return -ENOSPC;
}
rv = mddev->pers->resize(mddev, num_sectors);
if (!rv)
revalidate_disk(mddev->gendisk);
return rv;
}
static int update_raid_disks(struct mddev *mddev, int raid_disks)
{
int rv;
struct md_rdev *rdev;
/* change the number of raid disks */
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
if (mddev->ro)
return -EROFS;
if (raid_disks <= 0 ||
(mddev->max_disks && raid_disks >= mddev->max_disks))
return -EINVAL;
if (mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
mddev->reshape_position != MaxSector)
return -EBUSY;
rdev_for_each(rdev, mddev) {
if (mddev->raid_disks < raid_disks &&
rdev->data_offset < rdev->new_data_offset)
return -EINVAL;
if (mddev->raid_disks > raid_disks &&
rdev->data_offset > rdev->new_data_offset)
return -EINVAL;
}
mddev->delta_disks = raid_disks - mddev->raid_disks;
if (mddev->delta_disks < 0)
mddev->reshape_backwards = 1;
else if (mddev->delta_disks > 0)
mddev->reshape_backwards = 0;
rv = mddev->pers->check_reshape(mddev);
if (rv < 0) {
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
}
return rv;
}
/*
* update_array_info is used to change the configuration of an
* on-line array.
* The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
* fields in the info are checked against the array.
* Any differences that cannot be handled will cause an error.
* Normally, only one change can be managed at a time.
*/
static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
{
int rv = 0;
int cnt = 0;
int state = 0;
/* calculate expected state,ignoring low bits */
if (mddev->bitmap && mddev->bitmap_info.offset)
state |= (1 << MD_SB_BITMAP_PRESENT);
if (mddev->major_version != info->major_version ||
mddev->minor_version != info->minor_version ||
/* mddev->patch_version != info->patch_version || */
mddev->ctime != info->ctime ||
mddev->level != info->level ||
/* mddev->layout != info->layout || */
mddev->persistent != !info->not_persistent ||
mddev->chunk_sectors != info->chunk_size >> 9 ||
/* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
((state^info->state) & 0xfffffe00)
)
return -EINVAL;
/* Check there is only one change */
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
cnt++;
if (mddev->raid_disks != info->raid_disks)
cnt++;
if (mddev->layout != info->layout)
cnt++;
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
cnt++;
if (cnt == 0)
return 0;
if (cnt > 1)
return -EINVAL;
if (mddev->layout != info->layout) {
/* Change layout
* we don't need to do anything at the md level, the
* personality will take care of it all.
*/
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
else {
mddev->new_layout = info->layout;
rv = mddev->pers->check_reshape(mddev);
if (rv)
mddev->new_layout = mddev->layout;
return rv;
}
}
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
rv = update_size(mddev, (sector_t)info->size * 2);
if (mddev->raid_disks != info->raid_disks)
rv = update_raid_disks(mddev, info->raid_disks);
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
rv = -EINVAL;
goto err;
}
if (mddev->recovery || mddev->sync_thread) {
rv = -EBUSY;
goto err;
}
if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
struct bitmap *bitmap;
/* add the bitmap */
if (mddev->bitmap) {
rv = -EEXIST;
goto err;
}
if (mddev->bitmap_info.default_offset == 0) {
rv = -EINVAL;
goto err;
}
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
mddev->pers->quiesce(mddev, 1);
bitmap = bitmap_create(mddev, -1);
if (!IS_ERR(bitmap)) {
mddev->bitmap = bitmap;
rv = bitmap_load(mddev);
} else
rv = PTR_ERR(bitmap);
if (rv)
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
} else {
/* remove the bitmap */
if (!mddev->bitmap) {
rv = -ENOENT;
goto err;
}
if (mddev->bitmap->storage.file) {
rv = -EINVAL;
goto err;
}
mddev->pers->quiesce(mddev, 1);
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
mddev->bitmap_info.offset = 0;
}
}
md_update_sb(mddev, 1);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
return rv;
err:
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_cancel(mddev);
return rv;
}
static int set_disk_faulty(struct mddev *mddev, dev_t dev)
{
struct md_rdev *rdev;
int err = 0;
if (mddev->pers == NULL)
return -ENODEV;
rcu_read_lock();
rdev = find_rdev_rcu(mddev, dev);
if (!rdev)
err = -ENODEV;
else {
md_error(mddev, rdev);
if (!test_bit(Faulty, &rdev->flags))
err = -EBUSY;
}
rcu_read_unlock();
return err;
}
/*
* We have a problem here : there is no easy way to give a CHS
* virtual geometry. We currently pretend that we have a 2 heads
* 4 sectors (with a BIG number of cylinders...). This drives
* dosfs just mad... ;-)
*/
static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct mddev *mddev = bdev->bd_disk->private_data;
geo->heads = 2;
geo->sectors = 4;
geo->cylinders = mddev->array_sectors / 8;
return 0;
}
static inline bool md_ioctl_valid(unsigned int cmd)
{
switch (cmd) {
case ADD_NEW_DISK:
case BLKROSET:
case GET_ARRAY_INFO:
case GET_BITMAP_FILE:
case GET_DISK_INFO:
case HOT_ADD_DISK:
case HOT_REMOVE_DISK:
case RAID_AUTORUN:
case RAID_VERSION:
case RESTART_ARRAY_RW:
case RUN_ARRAY:
case SET_ARRAY_INFO:
case SET_BITMAP_FILE:
case SET_DISK_FAULTY:
case STOP_ARRAY:
case STOP_ARRAY_RO:
case CLUSTERED_DISK_NACK:
return true;
default:
return false;
}
}
static int md_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
int err = 0;
void __user *argp = (void __user *)arg;
struct mddev *mddev = NULL;
int ro;
if (!md_ioctl_valid(cmd))
return -ENOTTY;
switch (cmd) {
case RAID_VERSION:
case GET_ARRAY_INFO:
case GET_DISK_INFO:
break;
default:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
}
/*
* Commands dealing with the RAID driver but not any
* particular array:
*/
switch (cmd) {
case RAID_VERSION:
err = get_version(argp);
goto out;
#ifndef MODULE
case RAID_AUTORUN:
err = 0;
autostart_arrays(arg);
goto out;
#endif
default:;
}
/*
* Commands creating/starting a new array:
*/
mddev = bdev->bd_disk->private_data;
if (!mddev) {
BUG();
goto out;
}
/* Some actions do not requires the mutex */
switch (cmd) {
case GET_ARRAY_INFO:
if (!mddev->raid_disks && !mddev->external)
err = -ENODEV;
else
err = get_array_info(mddev, argp);
goto out;
case GET_DISK_INFO:
if (!mddev->raid_disks && !mddev->external)
err = -ENODEV;
else
err = get_disk_info(mddev, argp);
goto out;
case SET_DISK_FAULTY:
err = set_disk_faulty(mddev, new_decode_dev(arg));
goto out;
case GET_BITMAP_FILE:
err = get_bitmap_file(mddev, argp);
goto out;
}
if (cmd == ADD_NEW_DISK)
/* need to ensure md_delayed_delete() has completed */
flush_workqueue(md_misc_wq);
if (cmd == HOT_REMOVE_DISK)
/* need to ensure recovery thread has run */
wait_event_interruptible_timeout(mddev->sb_wait,
!test_bit(MD_RECOVERY_NEEDED,
&mddev->flags),
msecs_to_jiffies(5000));
if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
/* Need to flush page cache, and ensure no-one else opens
* and writes
*/
mutex_lock(&mddev->open_mutex);
if (mddev->pers && atomic_read(&mddev->openers) > 1) {
mutex_unlock(&mddev->open_mutex);
err = -EBUSY;
goto out;
}
set_bit(MD_STILL_CLOSED, &mddev->flags);
mutex_unlock(&mddev->open_mutex);
sync_blockdev(bdev);
}
err = mddev_lock(mddev);
if (err) {
printk(KERN_INFO
"md: ioctl lock interrupted, reason %d, cmd %d\n",
err, cmd);
goto out;
}
if (cmd == SET_ARRAY_INFO) {
mdu_array_info_t info;
if (!arg)
memset(&info, 0, sizeof(info));
else if (copy_from_user(&info, argp, sizeof(info))) {
err = -EFAULT;
goto unlock;
}
if (mddev->pers) {
err = update_array_info(mddev, &info);
if (err) {
printk(KERN_WARNING "md: couldn't update"
" array info. %d\n", err);
goto unlock;
}
goto unlock;
}
if (!list_empty(&mddev->disks)) {
printk(KERN_WARNING
"md: array %s already has disks!\n",
mdname(mddev));
err = -EBUSY;
goto unlock;
}
if (mddev->raid_disks) {
printk(KERN_WARNING
"md: array %s already initialised!\n",
mdname(mddev));
err = -EBUSY;
goto unlock;
}
err = set_array_info(mddev, &info);
if (err) {
printk(KERN_WARNING "md: couldn't set"
" array info. %d\n", err);
goto unlock;
}
goto unlock;
}
/*
* Commands querying/configuring an existing array:
*/
/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
* RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
if ((!mddev->raid_disks && !mddev->external)
&& cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
&& cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
&& cmd != GET_BITMAP_FILE) {
err = -ENODEV;
goto unlock;
}
/*
* Commands even a read-only array can execute:
*/
switch (cmd) {
case RESTART_ARRAY_RW:
err = restart_array(mddev);
goto unlock;
case STOP_ARRAY:
err = do_md_stop(mddev, 0, bdev);
goto unlock;
case STOP_ARRAY_RO:
err = md_set_readonly(mddev, bdev);
goto unlock;
case HOT_REMOVE_DISK:
err = hot_remove_disk(mddev, new_decode_dev(arg));
goto unlock;
case ADD_NEW_DISK:
/* We can support ADD_NEW_DISK on read-only arrays
* on if we are re-adding a preexisting device.
* So require mddev->pers and MD_DISK_SYNC.
*/
if (mddev->pers) {
mdu_disk_info_t info;
if (copy_from_user(&info, argp, sizeof(info)))
err = -EFAULT;
else if (!(info.state & (1<<MD_DISK_SYNC)))
/* Need to clear read-only for this */
break;
else
err = add_new_disk(mddev, &info);
goto unlock;
}
break;
case BLKROSET:
if (get_user(ro, (int __user *)(arg))) {
err = -EFAULT;
goto unlock;
}
err = -EINVAL;
/* if the bdev is going readonly the value of mddev->ro
* does not matter, no writes are coming
*/
if (ro)
goto unlock;
/* are we are already prepared for writes? */
if (mddev->ro != 1)
goto unlock;
/* transitioning to readauto need only happen for
* arrays that call md_write_start
*/
if (mddev->pers) {
err = restart_array(mddev);
if (err == 0) {
mddev->ro = 2;
set_disk_ro(mddev->gendisk, 0);
}
}
goto unlock;
}
/*
* The remaining ioctls are changing the state of the
* superblock, so we do not allow them on read-only arrays.
*/
if (mddev->ro && mddev->pers) {
if (mddev->ro == 2) {
mddev->ro = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
/* mddev_unlock will wake thread */
/* If a device failed while we were read-only, we
* need to make sure the metadata is updated now.
*/
if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
mddev_unlock(mddev);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
mddev_lock_nointr(mddev);
}
} else {
err = -EROFS;
goto unlock;
}
}
switch (cmd) {
case ADD_NEW_DISK:
{
mdu_disk_info_t info;
if (copy_from_user(&info, argp, sizeof(info)))
err = -EFAULT;
else
err = add_new_disk(mddev, &info);
goto unlock;
}
case CLUSTERED_DISK_NACK:
if (mddev_is_clustered(mddev))
md_cluster_ops->new_disk_ack(mddev, false);
else
err = -EINVAL;
goto unlock;
case HOT_ADD_DISK:
err = hot_add_disk(mddev, new_decode_dev(arg));
goto unlock;
case RUN_ARRAY:
err = do_md_run(mddev);
goto unlock;
case SET_BITMAP_FILE:
err = set_bitmap_file(mddev, (int)arg);
goto unlock;
default:
err = -EINVAL;
goto unlock;
}
unlock:
if (mddev->hold_active == UNTIL_IOCTL &&
err != -EINVAL)
mddev->hold_active = 0;
mddev_unlock(mddev);
out:
return err;
}
#ifdef CONFIG_COMPAT
static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case HOT_REMOVE_DISK:
case HOT_ADD_DISK:
case SET_DISK_FAULTY:
case SET_BITMAP_FILE:
/* These take in integer arg, do not convert */
break;
default:
arg = (unsigned long)compat_ptr(arg);
break;
}
return md_ioctl(bdev, mode, cmd, arg);
}
#endif /* CONFIG_COMPAT */
static int md_open(struct block_device *bdev, fmode_t mode)
{
/*
* Succeed if we can lock the mddev, which confirms that
* it isn't being stopped right now.
*/
struct mddev *mddev = mddev_find(bdev->bd_dev);
int err;
if (!mddev)
return -ENODEV;
if (mddev->gendisk != bdev->bd_disk) {
/* we are racing with mddev_put which is discarding this
* bd_disk.
*/
mddev_put(mddev);
/* Wait until bdev->bd_disk is definitely gone */
flush_workqueue(md_misc_wq);
/* Then retry the open from the top */
return -ERESTARTSYS;
}
BUG_ON(mddev != bdev->bd_disk->private_data);
if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
goto out;
err = 0;
atomic_inc(&mddev->openers);
clear_bit(MD_STILL_CLOSED, &mddev->flags);
mutex_unlock(&mddev->open_mutex);
check_disk_change(bdev);
out:
return err;
}
static void md_release(struct gendisk *disk, fmode_t mode)
{
struct mddev *mddev = disk->private_data;
BUG_ON(!mddev);
atomic_dec(&mddev->openers);
mddev_put(mddev);
}
static int md_media_changed(struct gendisk *disk)
{
struct mddev *mddev = disk->private_data;
return mddev->changed;
}
static int md_revalidate(struct gendisk *disk)
{
struct mddev *mddev = disk->private_data;
mddev->changed = 0;
return 0;
}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
.open = md_open,
.release = md_release,
.ioctl = md_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
.media_changed = md_media_changed,
.revalidate_disk= md_revalidate,
};
static int md_thread(void *arg)
{
struct md_thread *thread = arg;
/*
* md_thread is a 'system-thread', it's priority should be very
* high. We avoid resource deadlocks individually in each
* raid personality. (RAID5 does preallocation) We also use RR and
* the very same RT priority as kswapd, thus we will never get
* into a priority inversion deadlock.
*
* we definitely have to have equal or higher priority than
* bdflush, otherwise bdflush will deadlock if there are too
* many dirty RAID5 blocks.
*/
allow_signal(SIGKILL);
while (!kthread_should_stop()) {
/* We need to wait INTERRUPTIBLE so that
* we don't add to the load-average.
* That means we need to be sure no signals are
* pending
*/
if (signal_pending(current))
flush_signals(current);
wait_event_interruptible_timeout
(thread->wqueue,
test_bit(THREAD_WAKEUP, &thread->flags)
|| kthread_should_stop(),
thread->timeout);
clear_bit(THREAD_WAKEUP, &thread->flags);
if (!kthread_should_stop())
thread->run(thread);
}
return 0;
}
void md_wakeup_thread(struct md_thread *thread)
{
if (thread) {
pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
set_bit(THREAD_WAKEUP, &thread->flags);
wake_up(&thread->wqueue);
}
}
EXPORT_SYMBOL(md_wakeup_thread);
struct md_thread *md_register_thread(void (*run) (struct md_thread *),
struct mddev *mddev, const char *name)
{
struct md_thread *thread;
thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
if (!thread)
return NULL;
init_waitqueue_head(&thread->wqueue);
thread->run = run;
thread->mddev = mddev;
thread->timeout = MAX_SCHEDULE_TIMEOUT;
thread->tsk = kthread_run(md_thread, thread,
"%s_%s",
mdname(thread->mddev),
name);
if (IS_ERR(thread->tsk)) {
kfree(thread);
return NULL;
}
return thread;
}
EXPORT_SYMBOL(md_register_thread);
void md_unregister_thread(struct md_thread **threadp)
{
struct md_thread *thread = *threadp;
if (!thread)
return;
pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
/* Locking ensures that mddev_unlock does not wake_up a
* non-existent thread
*/
spin_lock(&pers_lock);
*threadp = NULL;
spin_unlock(&pers_lock);
kthread_stop(thread->tsk);
kfree(thread);
}
EXPORT_SYMBOL(md_unregister_thread);
void md_error(struct mddev *mddev, struct md_rdev *rdev)
{
if (!rdev || test_bit(Faulty, &rdev->flags))
return;
if (!mddev->pers || !mddev->pers->error_handler)
return;
mddev->pers->error_handler(mddev,rdev);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
md_new_event_inintr(mddev);
}
EXPORT_SYMBOL(md_error);
/* seq_file implementation /proc/mdstat */
static void status_unused(struct seq_file *seq)
{
int i = 0;
struct md_rdev *rdev;
seq_printf(seq, "unused devices: ");
list_for_each_entry(rdev, &pending_raid_disks, same_set) {
char b[BDEVNAME_SIZE];
i++;
seq_printf(seq, "%s ",
bdevname(rdev->bdev,b));
}
if (!i)
seq_printf(seq, "<none>");
seq_printf(seq, "\n");
}
static void status_resync(struct seq_file *seq, struct mddev *mddev)
{
sector_t max_sectors, resync, res;
unsigned long dt, db;
sector_t rt;
int scale;
unsigned int per_milli;
if (mddev->curr_resync <= 3)
resync = 0;
else
resync = mddev->curr_resync
- atomic_read(&mddev->recovery_active);
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else
max_sectors = mddev->dev_sectors;
WARN_ON(max_sectors == 0);
/* Pick 'scale' such that (resync>>scale)*1000 will fit
* in a sector_t, and (max_sectors>>scale) will fit in a
* u32, as those are the requirements for sector_div.
* Thus 'scale' must be at least 10
*/
scale = 10;
if (sizeof(sector_t) > sizeof(unsigned long)) {
while ( max_sectors/2 > (1ULL<<(scale+32)))
scale++;
}
res = (resync>>scale)*1000;
sector_div(res, (u32)((max_sectors>>scale)+1));
per_milli = res;
{
int i, x = per_milli/50, y = 20-x;
seq_printf(seq, "[");
for (i = 0; i < x; i++)
seq_printf(seq, "=");
seq_printf(seq, ">");
for (i = 0; i < y; i++)
seq_printf(seq, ".");
seq_printf(seq, "] ");
}
seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
(test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
"reshape" :
(test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
"check" :
(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
"resync" : "recovery"))),
per_milli/10, per_milli % 10,
(unsigned long long) resync/2,
(unsigned long long) max_sectors/2);
/*
* dt: time from mark until now
* db: blocks written from mark until now
* rt: remaining time
*
* rt is a sector_t, so could be 32bit or 64bit.
* So we divide before multiply in case it is 32bit and close
* to the limit.
* We scale the divisor (db) by 32 to avoid losing precision
* near the end of resync when the number of remaining sectors
* is close to 'db'.
* We then divide rt by 32 after multiplying by db to compensate.
* The '+1' avoids division by zero if db is very small.
*/
dt = ((jiffies - mddev->resync_mark) / HZ);
if (!dt) dt++;
db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
- mddev->resync_mark_cnt;
rt = max_sectors - resync; /* number of remaining sectors */
sector_div(rt, db/32+1);
rt *= dt;
rt >>= 5;
seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
((unsigned long)rt % 60)/6);
seq_printf(seq, " speed=%ldK/sec", db/2/dt);
}
static void *md_seq_start(struct seq_file *seq, loff_t *pos)
{
struct list_head *tmp;
loff_t l = *pos;
struct mddev *mddev;
if (l >= 0x10000)
return NULL;
if (!l--)
/* header */
return (void*)1;
spin_lock(&all_mddevs_lock);
list_for_each(tmp,&all_mddevs)
if (!l--) {
mddev = list_entry(tmp, struct mddev, all_mddevs);
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
return mddev;
}
spin_unlock(&all_mddevs_lock);
if (!l--)
return (void*)2;/* tail */
return NULL;
}
static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *tmp;
struct mddev *next_mddev, *mddev = v;
++*pos;
if (v == (void*)2)
return NULL;
spin_lock(&all_mddevs_lock);
if (v == (void*)1)
tmp = all_mddevs.next;
else
tmp = mddev->all_mddevs.next;
if (tmp != &all_mddevs)
next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
else {
next_mddev = (void*)2;
*pos = 0x10000;
}
spin_unlock(&all_mddevs_lock);
if (v != (void*)1)
mddev_put(mddev);
return next_mddev;
}
static void md_seq_stop(struct seq_file *seq, void *v)
{
struct mddev *mddev = v;
if (mddev && v != (void*)1 && v != (void*)2)
mddev_put(mddev);
}
static int md_seq_show(struct seq_file *seq, void *v)
{
struct mddev *mddev = v;
sector_t sectors;
struct md_rdev *rdev;
if (v == (void*)1) {
struct md_personality *pers;
seq_printf(seq, "Personalities : ");
spin_lock(&pers_lock);
list_for_each_entry(pers, &pers_list, list)
seq_printf(seq, "[%s] ", pers->name);
spin_unlock(&pers_lock);
seq_printf(seq, "\n");
seq->poll_event = atomic_read(&md_event_count);
return 0;
}
if (v == (void*)2) {
status_unused(seq);
return 0;
}
spin_lock(&mddev->lock);
if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
seq_printf(seq, "%s : %sactive", mdname(mddev),
mddev->pers ? "" : "in");
if (mddev->pers) {
if (mddev->ro==1)
seq_printf(seq, " (read-only)");
if (mddev->ro==2)
seq_printf(seq, " (auto-read-only)");
seq_printf(seq, " %s", mddev->pers->name);
}
sectors = 0;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
char b[BDEVNAME_SIZE];
seq_printf(seq, " %s[%d]",
bdevname(rdev->bdev,b), rdev->desc_nr);
if (test_bit(WriteMostly, &rdev->flags))
seq_printf(seq, "(W)");
if (test_bit(Faulty, &rdev->flags)) {
seq_printf(seq, "(F)");
continue;
}
if (rdev->raid_disk < 0)
seq_printf(seq, "(S)"); /* spare */
if (test_bit(Replacement, &rdev->flags))
seq_printf(seq, "(R)");
sectors += rdev->sectors;
}
rcu_read_unlock();
if (!list_empty(&mddev->disks)) {
if (mddev->pers)
seq_printf(seq, "\n %llu blocks",
(unsigned long long)
mddev->array_sectors / 2);
else
seq_printf(seq, "\n %llu blocks",
(unsigned long long)sectors / 2);
}
if (mddev->persistent) {
if (mddev->major_version != 0 ||
mddev->minor_version != 90) {
seq_printf(seq," super %d.%d",
mddev->major_version,
mddev->minor_version);
}
} else if (mddev->external)
seq_printf(seq, " super external:%s",
mddev->metadata_type);
else
seq_printf(seq, " super non-persistent");
if (mddev->pers) {
mddev->pers->status(seq, mddev);
seq_printf(seq, "\n ");
if (mddev->pers->sync_request) {
if (mddev->curr_resync > 2) {
status_resync(seq, mddev);
seq_printf(seq, "\n ");
} else if (mddev->curr_resync >= 1)
seq_printf(seq, "\tresync=DELAYED\n ");
else if (mddev->recovery_cp < MaxSector)
seq_printf(seq, "\tresync=PENDING\n ");
}
} else
seq_printf(seq, "\n ");
bitmap_status(seq, mddev->bitmap);
seq_printf(seq, "\n");
}
spin_unlock(&mddev->lock);
return 0;
}
static const struct seq_operations md_seq_ops = {
.start = md_seq_start,
.next = md_seq_next,
.stop = md_seq_stop,
.show = md_seq_show,
};
static int md_seq_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int error;
error = seq_open(file, &md_seq_ops);
if (error)
return error;
seq = file->private_data;
seq->poll_event = atomic_read(&md_event_count);
return error;
}
static int md_unloading;
static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
{
struct seq_file *seq = filp->private_data;
int mask;
if (md_unloading)
return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
poll_wait(filp, &md_event_waiters, wait);
/* always allow read */
mask = POLLIN | POLLRDNORM;
if (seq->poll_event != atomic_read(&md_event_count))
mask |= POLLERR | POLLPRI;
return mask;
}
static const struct file_operations md_seq_fops = {
.owner = THIS_MODULE,
.open = md_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
.poll = mdstat_poll,
};
int register_md_personality(struct md_personality *p)
{
printk(KERN_INFO "md: %s personality registered for level %d\n",
p->name, p->level);
spin_lock(&pers_lock);
list_add_tail(&p->list, &pers_list);
spin_unlock(&pers_lock);
return 0;
}
EXPORT_SYMBOL(register_md_personality);
int unregister_md_personality(struct md_personality *p)
{
printk(KERN_INFO "md: %s personality unregistered\n", p->name);
spin_lock(&pers_lock);
list_del_init(&p->list);
spin_unlock(&pers_lock);
return 0;
}
EXPORT_SYMBOL(unregister_md_personality);
int register_md_cluster_operations(struct md_cluster_operations *ops, struct module *module)
{
if (md_cluster_ops != NULL)
return -EALREADY;
spin_lock(&pers_lock);
md_cluster_ops = ops;
md_cluster_mod = module;
spin_unlock(&pers_lock);
return 0;
}
EXPORT_SYMBOL(register_md_cluster_operations);
int unregister_md_cluster_operations(void)
{
spin_lock(&pers_lock);
md_cluster_ops = NULL;
spin_unlock(&pers_lock);
return 0;
}
EXPORT_SYMBOL(unregister_md_cluster_operations);
int md_setup_cluster(struct mddev *mddev, int nodes)
{
int err;
err = request_module("md-cluster");
if (err) {
pr_err("md-cluster module not found.\n");
return -ENOENT;
}
spin_lock(&pers_lock);
if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
spin_unlock(&pers_lock);
return -ENOENT;
}
spin_unlock(&pers_lock);
return md_cluster_ops->join(mddev, nodes);
}
void md_cluster_stop(struct mddev *mddev)
{
if (!md_cluster_ops)
return;
md_cluster_ops->leave(mddev);
module_put(md_cluster_mod);
}
static int is_mddev_idle(struct mddev *mddev, int init)
{
struct md_rdev *rdev;
int idle;
int curr_events;
idle = 1;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
(int)part_stat_read(&disk->part0, sectors[1]) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and
* disk_stats is counted when it completes.
* So resync activity will cause curr_events to be smaller than
* when there was no such activity.
* non-sync IO will cause disk_stat to increase without
* increasing sync_io so curr_events will (eventually)
* be larger than it was before. Once it becomes
* substantially larger, the test below will cause
* the array to appear non-idle, and resync will slow
* down.
* If there is a lot of outstanding resync activity when
* we set last_event to curr_events, then all that activity
* completing might cause the array to appear non-idle
* and resync will be slowed down even though there might
* not have been non-resync activity. This will only
* happen once though. 'last_events' will soon reflect
* the state where there is little or no outstanding
* resync requests, and further resync activity will
* always make curr_events less than last_events.
*
*/
if (init || curr_events - rdev->last_events > 64) {
rdev->last_events = curr_events;
idle = 0;
}
}
rcu_read_unlock();
return idle;
}
void md_done_sync(struct mddev *mddev, int blocks, int ok)
{
/* another "blocks" (512byte) blocks have been synced */
atomic_sub(blocks, &mddev->recovery_active);
wake_up(&mddev->recovery_wait);
if (!ok) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
md_wakeup_thread(mddev->thread);
// stop recovery, signal do_sync ....
}
}
EXPORT_SYMBOL(md_done_sync);
/* md_write_start(mddev, bi)
* If we need to update some array metadata (e.g. 'active' flag
* in superblock) before writing, schedule a superblock update
* and wait for it to complete.
*/
void md_write_start(struct mddev *mddev, struct bio *bi)
{
int did_change = 0;
if (bio_data_dir(bi) != WRITE)
return;
BUG_ON(mddev->ro == 1);
if (mddev->ro == 2) {
/* need to switch to read/write */
mddev->ro = 0;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
did_change = 1;
}
atomic_inc(&mddev->writes_pending);
if (mddev->safemode == 1)
mddev->safemode = 0;
if (mddev->in_sync) {
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
set_bit(MD_CHANGE_PENDING, &mddev->flags);
md_wakeup_thread(mddev->thread);
did_change = 1;
}
spin_unlock(&mddev->lock);
}
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
}
EXPORT_SYMBOL(md_write_start);
void md_write_end(struct mddev *mddev)
{
if (atomic_dec_and_test(&mddev->writes_pending)) {
if (mddev->safemode == 2)
md_wakeup_thread(mddev->thread);
else if (mddev->safemode_delay)
mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
}
}
EXPORT_SYMBOL(md_write_end);
/* md_allow_write(mddev)
* Calling this ensures that the array is marked 'active' so that writes
* may proceed without blocking. It is important to call this before
* attempting a GFP_KERNEL allocation while holding the mddev lock.
* Must be called with mddev_lock held.
*
* In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
* is dropped, so return -EAGAIN after notifying userspace.
*/
int md_allow_write(struct mddev *mddev)
{
if (!mddev->pers)
return 0;
if (mddev->ro)
return 0;
if (!mddev->pers->sync_request)
return 0;
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
set_bit(MD_CHANGE_PENDING, &mddev->flags);
if (mddev->safemode_delay &&
mddev->safemode == 0)
mddev->safemode = 1;
spin_unlock(&mddev->lock);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
md_update_sb(mddev, 0);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
} else
spin_unlock(&mddev->lock);
if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
return -EAGAIN;
else
return 0;
}
EXPORT_SYMBOL_GPL(md_allow_write);
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
#define UPDATE_FREQUENCY (5*60*HZ)
void md_do_sync(struct md_thread *thread)
{
struct mddev *mddev = thread->mddev;
struct mddev *mddev2;
unsigned int currspeed = 0,
window;
sector_t max_sectors,j, io_sectors, recovery_done;
unsigned long mark[SYNC_MARKS];
unsigned long update_time;
sector_t mark_cnt[SYNC_MARKS];
int last_mark,m;
struct list_head *tmp;
sector_t last_check;
int skipped = 0;
struct md_rdev *rdev;
char *desc, *action = NULL;
struct blk_plug plug;
/* just incase thread restarts... */
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
return;
if (mddev->ro) {/* never try to sync a read-only array */
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
return;
}
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
desc = "data-check";
action = "check";
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
desc = "requested-resync";
action = "repair";
} else
desc = "resync";
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
desc = "reshape";
else
desc = "recovery";
mddev->last_sync_action = action ?: desc;
/* we overload curr_resync somewhat here.
* 0 == not engaged in resync at all
* 2 == checking that there is no conflict with another sync
* 1 == like 2, but have yielded to allow conflicting resync to
* commense
* other == active in resync - this many blocks
*
* Before starting a resync we must have set curr_resync to
* 2, and then checked that every "conflicting" array has curr_resync
* less than ours. When we find one that is the same or higher
* we wait on resync_wait. To avoid deadlock, we reduce curr_resync
* to 1 if we choose to yield (based arbitrarily on address of mddev structure).
* This will mean we have to start checking from the beginning again.
*
*/
do {
mddev->curr_resync = 2;
try_again:
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto skip;
for_each_mddev(mddev2, tmp) {
if (mddev2 == mddev)
continue;
if (!mddev->parallel_resync
&& mddev2->curr_resync
&& match_mddev_units(mddev, mddev2)) {
DEFINE_WAIT(wq);
if (mddev < mddev2 && mddev->curr_resync == 2) {
/* arbitrarily yield */
mddev->curr_resync = 1;
wake_up(&resync_wait);
}
if (mddev > mddev2 && mddev->curr_resync == 1)
/* no need to wait here, we can wait the next
* time 'round when curr_resync == 2
*/
continue;
/* We need to wait 'interruptible' so as not to
* contribute to the load average, and not to
* be caught by 'softlockup'
*/
prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
mddev2->curr_resync >= mddev->curr_resync) {
printk(KERN_INFO "md: delaying %s of %s"
" until %s has finished (they"
" share one or more physical units)\n",
desc, mdname(mddev), mdname(mddev2));
mddev_put(mddev2);
if (signal_pending(current))
flush_signals(current);
schedule();
finish_wait(&resync_wait, &wq);
goto try_again;
}
finish_wait(&resync_wait, &wq);
}
}
} while (mddev->curr_resync < 2);
j = 0;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
/* resync follows the size requested by the personality,
* which defaults to physical size, but can be virtual size
*/
max_sectors = mddev->resync_max_sectors;
atomic64_set(&mddev->resync_mismatches, 0);
/* we don't use the checkpoint if there's a bitmap */
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
j = mddev->resync_min;
else if (!mddev->bitmap)
j = mddev->recovery_cp;
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else {
/* recovery follows the physical size of devices */
max_sectors = mddev->dev_sectors;
j = MaxSector;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < j)
j = rdev->recovery_offset;
rcu_read_unlock();
/* If there is a bitmap, we need to make sure all
* writes that started before we added a spare
* complete before we start doing a recovery.
* Otherwise the write might complete and (via
* bitmap_endwrite) set a bit in the bitmap after the
* recovery has checked that bit and skipped that
* region.
*/
if (mddev->bitmap) {
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
}
printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
printk(KERN_INFO "md: minimum _guaranteed_ speed:"
" %d KB/sec/disk.\n", speed_min(mddev));
printk(KERN_INFO "md: using maximum available idle IO bandwidth "
"(but not more than %d KB/sec) for %s.\n",
speed_max(mddev), desc);
is_mddev_idle(mddev, 1); /* this initializes IO event counters */
io_sectors = 0;
for (m = 0; m < SYNC_MARKS; m++) {
mark[m] = jiffies;
mark_cnt[m] = io_sectors;
}
last_mark = 0;
mddev->resync_mark = mark[last_mark];
mddev->resync_mark_cnt = mark_cnt[last_mark];
/*
* Tune reconstruction:
*/
window = 32*(PAGE_SIZE/512);
printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
window/2, (unsigned long long)max_sectors/2);
atomic_set(&mddev->recovery_active, 0);
last_check = 0;
if (j>2) {
printk(KERN_INFO
"md: resuming %s of %s from checkpoint.\n",
desc, mdname(mddev));
mddev->curr_resync = j;
} else
mddev->curr_resync = 3; /* no longer delayed */
mddev->curr_resync_completed = j;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
md_new_event(mddev);
update_time = jiffies;
if (mddev_is_clustered(mddev))
md_cluster_ops->resync_start(mddev, j, max_sectors);
blk_start_plug(&plug);
while (j < max_sectors) {
sector_t sectors;
skipped = 0;
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
((mddev->curr_resync > mddev->curr_resync_completed &&
(mddev->curr_resync - mddev->curr_resync_completed)
> (max_sectors >> 4)) ||
time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
(j - mddev->curr_resync_completed)*2
>= mddev->resync_max - mddev->curr_resync_completed
)) {
/* time to update curr_resync_completed */
wait_event(mddev->recovery_wait,
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed = j;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
j > mddev->recovery_cp)
mddev->recovery_cp = j;
update_time = jiffies;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
while (j >= mddev->resync_max &&
!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* As this condition is controlled by user-space,
* we can block indefinitely, so use '_interruptible'
* to avoid triggering warnings.
*/
flush_signals(current); /* just in case */
wait_event_interruptible(mddev->recovery_wait,
mddev->resync_max > j
|| test_bit(MD_RECOVERY_INTR,
&mddev->recovery));
}
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
sectors = mddev->pers->sync_request(mddev, j, &skipped);
if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
break;
}
if (!skipped) { /* actual IO requested */
io_sectors += sectors;
atomic_add(sectors, &mddev->recovery_active);
}
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
j += sectors;
if (j > 2)
mddev->curr_resync = j;
if (mddev_is_clustered(mddev))
md_cluster_ops->resync_info_update(mddev, j, max_sectors);
mddev->curr_mark_cnt = io_sectors;
if (last_check == 0)
/* this is the earliest that rebuild will be
* visible in /proc/mdstat
*/
md_new_event(mddev);
if (last_check + window > io_sectors || j == max_sectors)
continue;
last_check = io_sectors;
repeat:
if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
/* step marks */
int next = (last_mark+1) % SYNC_MARKS;
mddev->resync_mark = mark[next];
mddev->resync_mark_cnt = mark_cnt[next];
mark[next] = jiffies;
mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
last_mark = next;
}
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
/*
* this loop exits only if either when we are slower than
* the 'hard' speed limit, or the system was IO-idle for
* a jiffy.
* the system might be non-idle CPU-wise, but we only care
* about not overloading the IO subsystem. (things like an
* e2fsck being done on the RAID array should execute fast)
*/
cond_resched();
recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
/((jiffies-mddev->resync_mark)/HZ +1) +1;
if (currspeed > speed_min(mddev)) {
if (currspeed > speed_max(mddev)) {
msleep(500);
goto repeat;
}
if (!is_mddev_idle(mddev, 0)) {
/*
* Give other IO more of a chance.
* The faster the devices, the less we wait.
*/
wait_event(mddev->recovery_wait,
!atomic_read(&mddev->recovery_active));
}
}
}
printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
test_bit(MD_RECOVERY_INTR, &mddev->recovery)
? "interrupted" : "done");
/*
* this also signals 'finished resyncing' to md_stop
*/
blk_finish_plug(&plug);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */
mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (mddev_is_clustered(mddev))
md_cluster_ops->resync_finish(mddev);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > 2) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
printk(KERN_INFO
"md: checkpointing %s of %s.\n",
desc, mdname(mddev));
if (test_bit(MD_RECOVERY_ERROR,
&mddev->recovery))
mddev->recovery_cp =
mddev->curr_resync_completed;
else
mddev->recovery_cp =
mddev->curr_resync;
}
} else
mddev->recovery_cp = MaxSector;
} else {
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
mddev->delta_disks >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < mddev->curr_resync)
rdev->recovery_offset = mddev->curr_resync;
rcu_read_unlock();
}
}
skip:
set_bit(MD_CHANGE_DEVS, &mddev->flags);
spin_lock(&mddev->lock);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* We completed so min/max setting can be forgotten if used. */
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = mddev->curr_resync_completed;
mddev->curr_resync = 0;
spin_unlock(&mddev->lock);
wake_up(&resync_wait);
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread);
return;
}
EXPORT_SYMBOL_GPL(md_do_sync);
static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *this)
{
struct md_rdev *rdev;
int spares = 0;
int removed = 0;
rdev_for_each(rdev, mddev)
if ((this == NULL || rdev == this) &&
rdev->raid_disk >= 0 &&
!test_bit(Blocked, &rdev->flags) &&
(test_bit(Faulty, &rdev->flags) ||
! test_bit(In_sync, &rdev->flags)) &&
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
removed++;
}
}
if (removed && mddev->kobj.sd)
sysfs_notify(&mddev->kobj, NULL, "degraded");
if (this)
goto no_add;
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags))
spares++;
if (rdev->raid_disk >= 0)
continue;
if (test_bit(Faulty, &rdev->flags))
continue;
if (mddev->ro &&
! (rdev->saved_raid_disk >= 0 &&
!test_bit(Bitmap_sync, &rdev->flags)))
continue;
if (rdev->saved_raid_disk < 0)
rdev->recovery_offset = 0;
if (mddev->pers->
hot_add_disk(mddev, rdev) == 0) {
if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
spares++;
md_new_event(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
}
}
no_add:
if (removed)
set_bit(MD_CHANGE_DEVS, &mddev->flags);
return spares;
}
static void md_start_sync(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
mddev->sync_thread = md_register_thread(md_do_sync,
mddev,
"resync");
if (!mddev->sync_thread) {
printk(KERN_ERR "%s: could not start resync"
" thread...\n",
mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
wake_up(&resync_wait);
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
&mddev->recovery))
if (mddev->sysfs_action)
sysfs_notify_dirent_safe(mddev->sysfs_action);
} else
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
}
/*
* This routine is regularly called by all per-raid-array threads to
* deal with generic issues like resync and super-block update.
* Raid personalities that don't have a thread (linear/raid0) do not
* need this as they never do any recovery or update the superblock.
*
* It does not do any resync itself, but rather "forks" off other threads
* to do that as needed.
* When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
* "->recovery" and create a thread at ->sync_thread.
* When the thread finishes it sets MD_RECOVERY_DONE
* and wakeups up this thread which will reap the thread and finish up.
* This thread also removes any faulty devices (with nr_pending == 0).
*
* The overall approach is:
* 1/ if the superblock needs updating, update it.
* 2/ If a recovery thread is running, don't do anything else.
* 3/ If recovery has finished, clean up, possibly marking spares active.
* 4/ If there are any faulty devices, remove them.
* 5/ If array is degraded, try to add spares devices
* 6/ If array has spares or is not in-sync, start a resync thread.
*/
void md_check_recovery(struct mddev *mddev)
{
if (mddev->suspended)
return;
if (mddev->bitmap)
bitmap_daemon_work(mddev);
if (signal_pending(current)) {
if (mddev->pers->sync_request && !mddev->external) {
printk(KERN_INFO "md: %s in immediate safe mode\n",
mdname(mddev));
mddev->safemode = 2;
}
flush_signals(current);
}
if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return;
if ( ! (
(mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->external == 0 && mddev->safemode == 1) ||
(mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
&& !mddev->in_sync && mddev->recovery_cp == MaxSector)
))
return;
if (mddev_trylock(mddev)) {
int spares = 0;
if (mddev->ro) {
struct md_rdev *rdev;
if (!mddev->external && mddev->in_sync)
/* 'Blocked' flag not needed as failed devices
* will be recorded if array switched to read/write.
* Leaving it set will prevent the device
* from being removed.
*/
rdev_for_each(rdev, mddev)
clear_bit(Blocked, &rdev->flags);
/* On a read-only array we can:
* - remove failed devices
* - add already-in_sync devices if the array itself
* is in-sync.
* As we only add devices that are already in-sync,
* we can activate the spares immediately.
*/
remove_and_add_spares(mddev, NULL);
/* There is no thread, but we need to call
* ->spare_active and clear saved_raid_disk
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
if (!mddev->external) {
int did_change = 0;
spin_lock(&mddev->lock);
if (mddev->safemode &&
!atomic_read(&mddev->writes_pending) &&
!mddev->in_sync &&
mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
did_change = 1;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
spin_unlock(&mddev->lock);
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
if (mddev->flags & MD_UPDATE_SB_FLAGS) {
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
md_update_sb(mddev, 0);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
}
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
/* resync/recovery still happening */
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
if (mddev->sync_thread) {
md_reap_sync_thread(mddev);
goto unlock;
}
/* Set RUNNING before clearing NEEDED to avoid
* any transients in the value of "sync_action".
*/
mddev->curr_resync_completed = 0;
spin_lock(&mddev->lock);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
spin_unlock(&mddev->lock);
/* Clear some bits that don't mean anything, but
* might be left set
*/
clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
goto not_running;
/* no recovery is running.
* remove any failed drives, then
* add spares if possible.
* Spares are also removed and re-added, to allow
* the personality to fail the re-add.
*/
if (mddev->reshape_position != MaxSector) {
if (mddev->pers->check_reshape == NULL ||
mddev->pers->check_reshape(mddev) != 0)
/* Cannot proceed */
goto not_running;
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if ((spares = remove_and_add_spares(mddev, NULL))) {
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (mddev->recovery_cp < MaxSector) {
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
/* nothing to be done ... */
goto not_running;
if (mddev->pers->sync_request) {
if (spares) {
/* We are adding a device or devices to an array
* which has the bitmap stored on all devices.
* So make sure all bitmap pages get written
*/
bitmap_write_all(mddev->bitmap);
}
INIT_WORK(&mddev->del_work, md_start_sync);
queue_work(md_misc_wq, &mddev->del_work);
goto unlock;
}
not_running:
if (!mddev->sync_thread) {
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
wake_up(&resync_wait);
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
&mddev->recovery))
if (mddev->sysfs_action)
sysfs_notify_dirent_safe(mddev->sysfs_action);
}
unlock:
wake_up(&mddev->sb_wait);
mddev_unlock(mddev);
}
}
EXPORT_SYMBOL(md_check_recovery);
void md_reap_sync_thread(struct mddev *mddev)
{
struct md_rdev *rdev;
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
/* activate any spares */
if (mddev->pers->spare_active(mddev)) {
sysfs_notify(&mddev->kobj, NULL,
"degraded");
set_bit(MD_CHANGE_DEVS, &mddev->flags);
}
}
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
mddev->pers->finish_reshape)
mddev->pers->finish_reshape(mddev);
/* If array is no-longer degraded, then any saved_raid_disk
* information must be scrapped.
*/
if (!mddev->degraded)
rdev_for_each(rdev, mddev)
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
wake_up(&resync_wait);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
}
EXPORT_SYMBOL(md_reap_sync_thread);
void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
sysfs_notify_dirent_safe(rdev->sysfs_state);
wait_event_timeout(rdev->blocked_wait,
!test_bit(Blocked, &rdev->flags) &&
!test_bit(BlockedBadBlocks, &rdev->flags),
msecs_to_jiffies(5000));
rdev_dec_pending(rdev, mddev);
}
EXPORT_SYMBOL(md_wait_for_blocked_rdev);
void md_finish_reshape(struct mddev *mddev)
{
/* called be personality module when reshape completes. */
struct md_rdev *rdev;
rdev_for_each(rdev, mddev) {
if (rdev->data_offset > rdev->new_data_offset)
rdev->sectors += rdev->data_offset - rdev->new_data_offset;
else
rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
rdev->data_offset = rdev->new_data_offset;
}
}
EXPORT_SYMBOL(md_finish_reshape);
/* Bad block management.
* We can record which blocks on each device are 'bad' and so just
* fail those blocks, or that stripe, rather than the whole device.
* Entries in the bad-block table are 64bits wide. This comprises:
* Length of bad-range, in sectors: 0-511 for lengths 1-512
* Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
* A 'shift' can be set so that larger blocks are tracked and
* consequently larger devices can be covered.
* 'Acknowledged' flag - 1 bit. - the most significant bit.
*
* Locking of the bad-block table uses a seqlock so md_is_badblock
* might need to retry if it is very unlucky.
* We will sometimes want to check for bad blocks in a bi_end_io function,
* so we use the write_seqlock_irq variant.
*
* When looking for a bad block we specify a range and want to
* know if any block in the range is bad. So we binary-search
* to the last range that starts at-or-before the given endpoint,
* (or "before the sector after the target range")
* then see if it ends after the given start.
* We return
* 0 if there are no known bad blocks in the range
* 1 if there are known bad block which are all acknowledged
* -1 if there are bad blocks which have not yet been acknowledged in metadata.
* plus the start/length of the first bad section we overlap.
*/
int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
sector_t *first_bad, int *bad_sectors)
{
int hi;
int lo;
u64 *p = bb->page;
int rv;
sector_t target = s + sectors;
unsigned seq;
if (bb->shift > 0) {
/* round the start down, and the end up */
s >>= bb->shift;
target += (1<<bb->shift) - 1;
target >>= bb->shift;
sectors = target - s;
}
/* 'target' is now the first block after the bad range */
retry:
seq = read_seqbegin(&bb->lock);
lo = 0;
rv = 0;
hi = bb->count;
/* Binary search between lo and hi for 'target'
* i.e. for the last range that starts before 'target'
*/
/* INVARIANT: ranges before 'lo' and at-or-after 'hi'
* are known not to be the last range before target.
* VARIANT: hi-lo is the number of possible
* ranges, and decreases until it reaches 1
*/
while (hi - lo > 1) {
int mid = (lo + hi) / 2;
sector_t a = BB_OFFSET(p[mid]);
if (a < target)
/* This could still be the one, earlier ranges
* could not. */
lo = mid;
else
/* This and later ranges are definitely out. */
hi = mid;
}
/* 'lo' might be the last that started before target, but 'hi' isn't */
if (hi > lo) {
/* need to check all range that end after 's' to see if
* any are unacknowledged.
*/
while (lo >= 0 &&
BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
if (BB_OFFSET(p[lo]) < target) {
/* starts before the end, and finishes after
* the start, so they must overlap
*/
if (rv != -1 && BB_ACK(p[lo]))
rv = 1;
else
rv = -1;
*first_bad = BB_OFFSET(p[lo]);
*bad_sectors = BB_LEN(p[lo]);
}
lo--;
}
}
if (read_seqretry(&bb->lock, seq))
goto retry;
return rv;
}
EXPORT_SYMBOL_GPL(md_is_badblock);
/*
* Add a range of bad blocks to the table.
* This might extend the table, or might contract it
* if two adjacent ranges can be merged.
* We binary-search to find the 'insertion' point, then
* decide how best to handle it.
*/
static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
int acknowledged)
{
u64 *p;
int lo, hi;
int rv = 1;
unsigned long flags;
if (bb->shift < 0)
/* badblocks are disabled */
return 0;
if (bb->shift) {
/* round the start down, and the end up */
sector_t next = s + sectors;
s >>= bb->shift;
next += (1<<bb->shift) - 1;
next >>= bb->shift;
sectors = next - s;
}
write_seqlock_irqsave(&bb->lock, flags);
p = bb->page;
lo = 0;
hi = bb->count;
/* Find the last range that starts at-or-before 's' */
while (hi - lo > 1) {
int mid = (lo + hi) / 2;
sector_t a = BB_OFFSET(p[mid]);
if (a <= s)
lo = mid;
else
hi = mid;
}
if (hi > lo && BB_OFFSET(p[lo]) > s)
hi = lo;
if (hi > lo) {
/* we found a range that might merge with the start
* of our new range
*/
sector_t a = BB_OFFSET(p[lo]);
sector_t e = a + BB_LEN(p[lo]);
int ack = BB_ACK(p[lo]);
if (e >= s) {
/* Yes, we can merge with a previous range */
if (s == a && s + sectors >= e)
/* new range covers old */
ack = acknowledged;
else
ack = ack && acknowledged;
if (e < s + sectors)
e = s + sectors;
if (e - a <= BB_MAX_LEN) {
p[lo] = BB_MAKE(a, e-a, ack);
s = e;
} else {
/* does not all fit in one range,
* make p[lo] maximal
*/
if (BB_LEN(p[lo]) != BB_MAX_LEN)
p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
s = a + BB_MAX_LEN;
}
sectors = e - s;
}
}
if (sectors && hi < bb->count) {
/* 'hi' points to the first range that starts after 's'.
* Maybe we can merge with the start of that range */
sector_t a = BB_OFFSET(p[hi]);
sector_t e = a + BB_LEN(p[hi]);
int ack = BB_ACK(p[hi]);
if (a <= s + sectors) {
/* merging is possible */
if (e <= s + sectors) {
/* full overlap */
e = s + sectors;
ack = acknowledged;
} else
ack = ack && acknowledged;
a = s;
if (e - a <= BB_MAX_LEN) {
p[hi] = BB_MAKE(a, e-a, ack);
s = e;
} else {
p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
s = a + BB_MAX_LEN;
}
sectors = e - s;
lo = hi;
hi++;
}
}
if (sectors == 0 && hi < bb->count) {
/* we might be able to combine lo and hi */
/* Note: 's' is at the end of 'lo' */
sector_t a = BB_OFFSET(p[hi]);
int lolen = BB_LEN(p[lo]);
int hilen = BB_LEN(p[hi]);
int newlen = lolen + hilen - (s - a);
if (s >= a && newlen < BB_MAX_LEN) {
/* yes, we can combine them */
int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
memmove(p + hi, p + hi + 1,
(bb->count - hi - 1) * 8);
bb->count--;
}
}
while (sectors) {
/* didn't merge (it all).
* Need to add a range just before 'hi' */
if (bb->count >= MD_MAX_BADBLOCKS) {
/* No room for more */
rv = 0;
break;
} else {
int this_sectors = sectors;
memmove(p + hi + 1, p + hi,
(bb->count - hi) * 8);
bb->count++;
if (this_sectors > BB_MAX_LEN)
this_sectors = BB_MAX_LEN;
p[hi] = BB_MAKE(s, this_sectors, acknowledged);
sectors -= this_sectors;
s += this_sectors;
}
}
bb->changed = 1;
if (!acknowledged)
bb->unacked_exist = 1;
write_sequnlock_irqrestore(&bb->lock, flags);
return rv;
}
int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new)
{
int rv;
if (is_new)
s += rdev->new_data_offset;
else
s += rdev->data_offset;
rv = md_set_badblocks(&rdev->badblocks,
s, sectors, 0);
if (rv) {
/* Make sure they get written out promptly */
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
md_wakeup_thread(rdev->mddev->thread);
}
return rv;
}
EXPORT_SYMBOL_GPL(rdev_set_badblocks);
/*
* Remove a range of bad blocks from the table.
* This may involve extending the table if we spilt a region,
* but it must not fail. So if the table becomes full, we just
* drop the remove request.
*/
static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
{
u64 *p;
int lo, hi;
sector_t target = s + sectors;
int rv = 0;
if (bb->shift > 0) {
/* When clearing we round the start up and the end down.
* This should not matter as the shift should align with
* the block size and no rounding should ever be needed.
* However it is better the think a block is bad when it
* isn't than to think a block is not bad when it is.
*/
s += (1<<bb->shift) - 1;
s >>= bb->shift;
target >>= bb->shift;
sectors = target - s;
}
write_seqlock_irq(&bb->lock);
p = bb->page;
lo = 0;
hi = bb->count;
/* Find the last range that starts before 'target' */
while (hi - lo > 1) {
int mid = (lo + hi) / 2;
sector_t a = BB_OFFSET(p[mid]);
if (a < target)
lo = mid;
else
hi = mid;
}
if (hi > lo) {
/* p[lo] is the last range that could overlap the
* current range. Earlier ranges could also overlap,
* but only this one can overlap the end of the range.
*/
if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
/* Partial overlap, leave the tail of this range */
int ack = BB_ACK(p[lo]);
sector_t a = BB_OFFSET(p[lo]);
sector_t end = a + BB_LEN(p[lo]);
if (a < s) {
/* we need to split this range */
if (bb->count >= MD_MAX_BADBLOCKS) {
rv = -ENOSPC;
goto out;
}
memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
bb->count++;
p[lo] = BB_MAKE(a, s-a, ack);
lo++;
}
p[lo] = BB_MAKE(target, end - target, ack);
/* there is no longer an overlap */
hi = lo;
lo--;
}
while (lo >= 0 &&
BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
/* This range does overlap */
if (BB_OFFSET(p[lo]) < s) {
/* Keep the early parts of this range. */
int ack = BB_ACK(p[lo]);
sector_t start = BB_OFFSET(p[lo]);
p[lo] = BB_MAKE(start, s - start, ack);
/* now low doesn't overlap, so.. */
break;
}
lo--;
}
/* 'lo' is strictly before, 'hi' is strictly after,
* anything between needs to be discarded
*/
if (hi - lo > 1) {
memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
bb->count -= (hi - lo - 1);
}
}
bb->changed = 1;
out:
write_sequnlock_irq(&bb->lock);
return rv;
}
int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new)
{
if (is_new)
s += rdev->new_data_offset;
else
s += rdev->data_offset;
return md_clear_badblocks(&rdev->badblocks,
s, sectors);
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
/*
* Acknowledge all bad blocks in a list.
* This only succeeds if ->changed is clear. It is used by
* in-kernel metadata updates
*/
void md_ack_all_badblocks(struct badblocks *bb)
{
if (bb->page == NULL || bb->changed)
/* no point even trying */
return;
write_seqlock_irq(&bb->lock);
if (bb->changed == 0 && bb->unacked_exist) {
u64 *p = bb->page;
int i;
for (i = 0; i < bb->count ; i++) {
if (!BB_ACK(p[i])) {
sector_t start = BB_OFFSET(p[i]);
int len = BB_LEN(p[i]);
p[i] = BB_MAKE(start, len, 1);
}
}
bb->unacked_exist = 0;
}
write_sequnlock_irq(&bb->lock);
}
EXPORT_SYMBOL_GPL(md_ack_all_badblocks);
/* sysfs access to bad-blocks list.
* We present two files.
* 'bad-blocks' lists sector numbers and lengths of ranges that
* are recorded as bad. The list is truncated to fit within
* the one-page limit of sysfs.
* Writing "sector length" to this file adds an acknowledged
* bad block list.
* 'unacknowledged-bad-blocks' lists bad blocks that have not yet
* been acknowledged. Writing to this file adds bad blocks
* without acknowledging them. This is largely for testing.
*/
static ssize_t
badblocks_show(struct badblocks *bb, char *page, int unack)
{
size_t len;
int i;
u64 *p = bb->page;
unsigned seq;
if (bb->shift < 0)
return 0;
retry:
seq = read_seqbegin(&bb->lock);
len = 0;
i = 0;
while (len < PAGE_SIZE && i < bb->count) {
sector_t s = BB_OFFSET(p[i]);
unsigned int length = BB_LEN(p[i]);
int ack = BB_ACK(p[i]);
i++;
if (unack && ack)
continue;
len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
(unsigned long long)s << bb->shift,
length << bb->shift);
}
if (unack && len == 0)
bb->unacked_exist = 0;
if (read_seqretry(&bb->lock, seq))
goto retry;
return len;
}
#define DO_DEBUG 1
static ssize_t
badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
{
unsigned long long sector;
int length;
char newline;
#ifdef DO_DEBUG
/* Allow clearing via sysfs *only* for testing/debugging.
* Normally only a successful write may clear a badblock
*/
int clear = 0;
if (page[0] == '-') {
clear = 1;
page++;
}
#endif /* DO_DEBUG */
switch (sscanf(page, "%llu %d%c", §or, &length, &newline)) {
case 3:
if (newline != '\n')
return -EINVAL;
case 2:
if (length <= 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
#ifdef DO_DEBUG
if (clear) {
md_clear_badblocks(bb, sector, length);
return len;
}
#endif /* DO_DEBUG */
if (md_set_badblocks(bb, sector, length, !unack))
return len;
else
return -ENOSPC;
}
static int md_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
struct list_head *tmp;
struct mddev *mddev;
int need_delay = 0;
for_each_mddev(mddev, tmp) {
if (mddev_trylock(mddev)) {
if (mddev->pers)
__md_stop_writes(mddev);
if (mddev->persistent)
mddev->safemode = 2;
mddev_unlock(mddev);
}
need_delay = 1;
}
/*
* certain more exotic SCSI devices are known to be
* volatile wrt too early system reboots. While the
* right place to handle this issue is the given
* driver, we do want to have a safe RAID driver ...
*/
if (need_delay)
mdelay(1000*1);
return NOTIFY_DONE;
}
static struct notifier_block md_notifier = {
.notifier_call = md_notify_reboot,
.next = NULL,
.priority = INT_MAX, /* before any real devices */
};
static void md_geninit(void)
{
pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
}
static int __init md_init(void)
{
int ret = -ENOMEM;
md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
if (!md_wq)
goto err_wq;
md_misc_wq = alloc_workqueue("md_misc", 0, 0);
if (!md_misc_wq)
goto err_misc_wq;
if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
goto err_md;
if ((ret = register_blkdev(0, "mdp")) < 0)
goto err_mdp;
mdp_major = ret;
blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
md_probe, NULL, NULL);
blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
md_probe, NULL, NULL);
register_reboot_notifier(&md_notifier);
raid_table_header = register_sysctl_table(raid_root_table);
md_geninit();
return 0;
err_mdp:
unregister_blkdev(MD_MAJOR, "md");
err_md:
destroy_workqueue(md_misc_wq);
err_misc_wq:
destroy_workqueue(md_wq);
err_wq:
return ret;
}
void md_reload_sb(struct mddev *mddev)
{
struct md_rdev *rdev, *tmp;
rdev_for_each_safe(rdev, tmp, mddev) {
rdev->sb_loaded = 0;
ClearPageUptodate(rdev->sb_page);
}
mddev->raid_disks = 0;
analyze_sbs(mddev);
rdev_for_each_safe(rdev, tmp, mddev) {
struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
/* since we don't write to faulty devices, we figure out if the
* disk is faulty by comparing events
*/
if (mddev->events > sb->events)
set_bit(Faulty, &rdev->flags);
}
}
EXPORT_SYMBOL(md_reload_sb);
#ifndef MODULE
/*
* Searches all registered partitions for autorun RAID arrays
* at boot time.
*/
static LIST_HEAD(all_detected_devices);
struct detected_devices_node {
struct list_head list;
dev_t dev;
};
void md_autodetect_dev(dev_t dev)
{
struct detected_devices_node *node_detected_dev;
node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
if (node_detected_dev) {
node_detected_dev->dev = dev;
list_add_tail(&node_detected_dev->list, &all_detected_devices);
} else {
printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
}
}
static void autostart_arrays(int part)
{
struct md_rdev *rdev;
struct detected_devices_node *node_detected_dev;
dev_t dev;
int i_scanned, i_passed;
i_scanned = 0;
i_passed = 0;
printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
i_scanned++;
node_detected_dev = list_entry(all_detected_devices.next,
struct detected_devices_node, list);
list_del(&node_detected_dev->list);
dev = node_detected_dev->dev;
kfree(node_detected_dev);
rdev = md_import_device(dev,0, 90);
if (IS_ERR(rdev))
continue;
if (test_bit(Faulty, &rdev->flags))
continue;
set_bit(AutoDetected, &rdev->flags);
list_add(&rdev->same_set, &pending_raid_disks);
i_passed++;
}
printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
i_scanned, i_passed);
autorun_devices(part);
}
#endif /* !MODULE */
static __exit void md_exit(void)
{
struct mddev *mddev;
struct list_head *tmp;
int delay = 1;
blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
unregister_blkdev(MD_MAJOR,"md");
unregister_blkdev(mdp_major, "mdp");
unregister_reboot_notifier(&md_notifier);
unregister_sysctl_table(raid_table_header);
/* We cannot unload the modules while some process is
* waiting for us in select() or poll() - wake them up
*/
md_unloading = 1;
while (waitqueue_active(&md_event_waiters)) {
/* not safe to leave yet */
wake_up(&md_event_waiters);
msleep(delay);
delay += delay;
}
remove_proc_entry("mdstat", NULL);
for_each_mddev(mddev, tmp) {
export_array(mddev);
mddev->hold_active = 0;
}
destroy_workqueue(md_misc_wq);
destroy_workqueue(md_wq);
}
subsys_initcall(md_init);
module_exit(md_exit)
static int get_ro(char *buffer, struct kernel_param *kp)
{
return sprintf(buffer, "%d", start_readonly);
}
static int set_ro(const char *val, struct kernel_param *kp)
{
return kstrtouint(val, 10, (unsigned int *)&start_readonly);
}
module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD RAID framework");
MODULE_ALIAS("md");
MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);
| ./CrossVul/dataset_final_sorted/CWE-200/c/good_1694_0 |
crossvul-cpp_data_bad_335_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% X X BBBB M M %
% X X B B MM MM %
% X BBBB M M M %
% X X B B M M %
% X X BBBB M M %
% %
% %
% Read/Write X Windows System Bitmap Format %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/module.h"
#include "MagickCore/utility.h"
/*
Forward declarations.
*/
static MagickBooleanType
WriteXBMImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s X B M %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsXBM() returns MagickTrue if the image format type, identified by the
% magick string, is XBM.
%
% The format of the IsXBM method is:
%
% MagickBooleanType IsXBM(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsXBM(const unsigned char *magick,const size_t length)
{
if (length < 7)
return(MagickFalse);
if (memcmp(magick,"#define",7) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d X B M I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadXBMImage() reads an X11 bitmap image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadXBMImage method is:
%
% Image *ReadXBMImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static int XBMInteger(Image *image,short int *hex_digits)
{
int
c;
unsigned int
value;
/*
Skip any leading whitespace.
*/
do
{
c=ReadBlobByte(image);
if (c == EOF)
return(-1);
} while ((c == ' ') || (c == '\t') || (c == '\n') || (c == '\r'));
/*
Evaluate number.
*/
value=0;
do
{
if (value > (unsigned int) (INT_MAX/10))
break;
value*=16;
c&=0xff;
if (value > (unsigned int) (INT_MAX-hex_digits[c]))
break;
value+=hex_digits[c];
c=ReadBlobByte(image);
if (c == EOF)
return(-1);
} while (hex_digits[c] >= 0);
return((int) value);
}
static Image *ReadXBMImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
char
buffer[MagickPathExtent],
name[MagickPathExtent];
Image
*image;
int
c;
MagickBooleanType
status;
register ssize_t
i,
x;
register Quantum
*q;
register unsigned char
*p;
short int
hex_digits[256];
ssize_t
y;
unsigned char
*data;
unsigned int
bit,
byte,
bytes_per_line,
height,
length,
padding,
version,
width;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read X bitmap header.
*/
width=0;
height=0;
*name='\0';
while (ReadBlobString(image,buffer) != (char *) NULL)
if (sscanf(buffer,"#define %1024s %u",name,&width) == 2)
if ((strlen(name) >= 6) &&
(LocaleCompare(name+strlen(name)-6,"_width") == 0))
break;
while (ReadBlobString(image,buffer) != (char *) NULL)
if (sscanf(buffer,"#define %1024s %u",name,&height) == 2)
if ((strlen(name) >= 7) &&
(LocaleCompare(name+strlen(name)-7,"_height") == 0))
break;
image->columns=width;
image->rows=height;
image->depth=8;
image->storage_class=PseudoClass;
image->colors=2;
/*
Scan until hex digits.
*/
version=11;
while (ReadBlobString(image,buffer) != (char *) NULL)
{
if (sscanf(buffer,"static short %1024s = {",name) == 1)
version=10;
else
if (sscanf(buffer,"static unsigned char %1024s = {",name) == 1)
version=11;
else
if (sscanf(buffer,"static char %1024s = {",name) == 1)
version=11;
else
continue;
p=(unsigned char *) strrchr(name,'_');
if (p == (unsigned char *) NULL)
p=(unsigned char *) name;
else
p++;
if (LocaleCompare("bits[]",(char *) p) == 0)
break;
}
if ((image->columns == 0) || (image->rows == 0) ||
(EOFBlob(image) != MagickFalse))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image structure.
*/
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
/*
Initialize colormap.
*/
image->colormap[0].red=(MagickRealType) QuantumRange;
image->colormap[0].green=(MagickRealType) QuantumRange;
image->colormap[0].blue=(MagickRealType) QuantumRange;
image->colormap[1].red=0.0;
image->colormap[1].green=0.0;
image->colormap[1].blue=0.0;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/*
Initialize hex values.
*/
for (i=0; i < (ssize_t) (sizeof(hex_digits)/sizeof(*hex_digits)); i++)
hex_digits[i]=(-1);
hex_digits[(int) '0']=0;
hex_digits[(int) '1']=1;
hex_digits[(int) '2']=2;
hex_digits[(int) '3']=3;
hex_digits[(int) '4']=4;
hex_digits[(int) '5']=5;
hex_digits[(int) '6']=6;
hex_digits[(int) '7']=7;
hex_digits[(int) '8']=8;
hex_digits[(int) '9']=9;
hex_digits[(int) 'A']=10;
hex_digits[(int) 'B']=11;
hex_digits[(int) 'C']=12;
hex_digits[(int) 'D']=13;
hex_digits[(int) 'E']=14;
hex_digits[(int) 'F']=15;
hex_digits[(int) 'a']=10;
hex_digits[(int) 'b']=11;
hex_digits[(int) 'c']=12;
hex_digits[(int) 'd']=13;
hex_digits[(int) 'e']=14;
hex_digits[(int) 'f']=15;
hex_digits[(int) 'x']=0;
hex_digits[(int) ' ']=(-1);
hex_digits[(int) ',']=(-1);
hex_digits[(int) '}']=(-1);
hex_digits[(int) '\n']=(-1);
hex_digits[(int) '\t']=(-1);
/*
Read hex image data.
*/
padding=0;
if (((image->columns % 16) != 0) && ((image->columns % 16) < 9) &&
(version == 10))
padding=1;
bytes_per_line=(unsigned int) (image->columns+7)/8+padding;
length=(unsigned int) image->rows;
data=(unsigned char *) AcquireQuantumMemory(length,bytes_per_line*
sizeof(*data));
if (data == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
p=data;
if (version == 10)
for (i=0; i < (ssize_t) (bytes_per_line*image->rows); (i+=2))
{
c=XBMInteger(image,hex_digits);
if (c < 0)
break;
*p++=(unsigned char) c;
if ((padding == 0) || (((i+2) % bytes_per_line) != 0))
*p++=(unsigned char) (c >> 8);
}
else
for (i=0; i < (ssize_t) (bytes_per_line*image->rows); i++)
{
c=XBMInteger(image,hex_digits);
if (c < 0)
break;
*p++=(unsigned char) c;
}
if (EOFBlob(image) != MagickFalse)
{
data=(unsigned char *) RelinquishMagickMemory(data);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
/*
Convert X bitmap image to pixel packets.
*/
p=data;
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
bit=0;
byte=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (bit == 0)
byte=(unsigned int) (*p++);
SetPixelIndex(image,(Quantum) ((byte & 0x01) != 0 ? 0x01 : 0x00),q);
bit++;
byte>>=1;
if (bit == 8)
bit=0;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
data=(unsigned char *) RelinquishMagickMemory(data);
(void) SyncImage(image,exception);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r X B M I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterXBMImage() adds attributes for the XBM image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterXBMImage method is:
%
% size_t RegisterXBMImage(void)
%
*/
ModuleExport size_t RegisterXBMImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("XBM","XBM",
"X Windows system bitmap (black and white)");
entry->decoder=(DecodeImageHandler *) ReadXBMImage;
entry->encoder=(EncodeImageHandler *) WriteXBMImage;
entry->magick=(IsImageFormatHandler *) IsXBM;
entry->flags^=CoderAdjoinFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r X B M I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterXBMImage() removes format registrations made by the
% XBM module from the list of supported formats.
%
% The format of the UnregisterXBMImage method is:
%
% UnregisterXBMImage(void)
%
*/
ModuleExport void UnregisterXBMImage(void)
{
(void) UnregisterMagickInfo("XBM");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e X B M I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteXBMImage() writes an image to a file in the X bitmap format.
%
% The format of the WriteXBMImage method is:
%
% MagickBooleanType WriteXBMImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WriteXBMImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
char
basename[MagickPathExtent],
buffer[MagickPathExtent];
MagickBooleanType
status;
register const Quantum
*p;
register ssize_t
x;
size_t
bit,
byte;
ssize_t
count,
y;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace,exception);
/*
Write X bitmap header.
*/
GetPathComponent(image->filename,BasePath,basename);
(void) FormatLocaleString(buffer,MagickPathExtent,"#define %s_width %.20g\n",
basename,(double) image->columns);
(void) WriteBlob(image,strlen(buffer),(unsigned char *) buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"#define %s_height %.20g\n",
basename,(double) image->rows);
(void) WriteBlob(image,strlen(buffer),(unsigned char *) buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,
"static char %s_bits[] = {\n",basename);
(void) WriteBlob(image,strlen(buffer),(unsigned char *) buffer);
(void) CopyMagickString(buffer," ",MagickPathExtent);
(void) WriteBlob(image,strlen(buffer),(unsigned char *) buffer);
/*
Convert MIFF to X bitmap pixels.
*/
(void) SetImageType(image,BilevelType,exception);
bit=0;
byte=0;
count=0;
x=0;
y=0;
(void) CopyMagickString(buffer," ",MagickPathExtent);
(void) WriteBlob(image,strlen(buffer),(unsigned char *) buffer);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte>>=1;
if (GetPixelLuma(image,p) < (QuantumRange/2))
byte|=0x80;
bit++;
if (bit == 8)
{
/*
Write a bitmap byte to the image file.
*/
(void) FormatLocaleString(buffer,MagickPathExtent,"0x%02X, ",
(unsigned int) (byte & 0xff));
(void) WriteBlob(image,strlen(buffer),(unsigned char *) buffer);
count++;
if (count == 12)
{
(void) CopyMagickString(buffer,"\n ",MagickPathExtent);
(void) WriteBlob(image,strlen(buffer),(unsigned char *) buffer);
count=0;
};
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
/*
Write a bitmap byte to the image file.
*/
byte>>=(8-bit);
(void) FormatLocaleString(buffer,MagickPathExtent,"0x%02X, ",
(unsigned int) (byte & 0xff));
(void) WriteBlob(image,strlen(buffer),(unsigned char *) buffer);
count++;
if (count == 12)
{
(void) CopyMagickString(buffer,"\n ",MagickPathExtent);
(void) WriteBlob(image,strlen(buffer),(unsigned char *) buffer);
count=0;
};
bit=0;
byte=0;
};
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
(void) CopyMagickString(buffer,"};\n",MagickPathExtent);
(void) WriteBlob(image,strlen(buffer),(unsigned char *) buffer);
(void) CloseBlob(image);
return(MagickTrue);
}
| ./CrossVul/dataset_final_sorted/CWE-200/c/bad_335_0 |
crossvul-cpp_data_bad_3876_0 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-200/c/bad_3876_0 |
crossvul-cpp_data_bad_3351_7 | /*
* XDR support for nfsd
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
#include "vfs.h"
#include "xdr.h"
#include "auth.h"
#define NFSDDBG_FACILITY NFSDDBG_XDR
/*
* Mapping of S_IF* types to NFS file types
*/
static u32 nfs_ftypes[] = {
NFNON, NFCHR, NFCHR, NFBAD,
NFDIR, NFBAD, NFBLK, NFBAD,
NFREG, NFBAD, NFLNK, NFBAD,
NFSOCK, NFBAD, NFLNK, NFBAD,
};
/*
* XDR functions for basic NFS types
*/
static __be32 *
decode_fh(__be32 *p, struct svc_fh *fhp)
{
fh_init(fhp, NFS_FHSIZE);
memcpy(&fhp->fh_handle.fh_base, p, NFS_FHSIZE);
fhp->fh_handle.fh_size = NFS_FHSIZE;
/* FIXME: Look up export pointer here and verify
* Sun Secure RPC if requested */
return p + (NFS_FHSIZE >> 2);
}
/* Helper function for NFSv2 ACL code */
__be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp)
{
return decode_fh(p, fhp);
}
static __be32 *
encode_fh(__be32 *p, struct svc_fh *fhp)
{
memcpy(p, &fhp->fh_handle.fh_base, NFS_FHSIZE);
return p + (NFS_FHSIZE>> 2);
}
/*
* Decode a file name and make sure that the path contains
* no slashes or null bytes.
*/
static __be32 *
decode_filename(__be32 *p, char **namp, unsigned int *lenp)
{
char *name;
unsigned int i;
if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS_MAXNAMLEN)) != NULL) {
for (i = 0, name = *namp; i < *lenp; i++, name++) {
if (*name == '\0' || *name == '/')
return NULL;
}
}
return p;
}
static __be32 *
decode_pathname(__be32 *p, char **namp, unsigned int *lenp)
{
char *name;
unsigned int i;
if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS_MAXPATHLEN)) != NULL) {
for (i = 0, name = *namp; i < *lenp; i++, name++) {
if (*name == '\0')
return NULL;
}
}
return p;
}
static __be32 *
decode_sattr(__be32 *p, struct iattr *iap)
{
u32 tmp, tmp1;
iap->ia_valid = 0;
/* Sun client bug compatibility check: some sun clients seem to
* put 0xffff in the mode field when they mean 0xffffffff.
* Quoting the 4.4BSD nfs server code: Nah nah nah nah na nah.
*/
if ((tmp = ntohl(*p++)) != (u32)-1 && tmp != 0xffff) {
iap->ia_valid |= ATTR_MODE;
iap->ia_mode = tmp;
}
if ((tmp = ntohl(*p++)) != (u32)-1) {
iap->ia_uid = make_kuid(&init_user_ns, tmp);
if (uid_valid(iap->ia_uid))
iap->ia_valid |= ATTR_UID;
}
if ((tmp = ntohl(*p++)) != (u32)-1) {
iap->ia_gid = make_kgid(&init_user_ns, tmp);
if (gid_valid(iap->ia_gid))
iap->ia_valid |= ATTR_GID;
}
if ((tmp = ntohl(*p++)) != (u32)-1) {
iap->ia_valid |= ATTR_SIZE;
iap->ia_size = tmp;
}
tmp = ntohl(*p++); tmp1 = ntohl(*p++);
if (tmp != (u32)-1 && tmp1 != (u32)-1) {
iap->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET;
iap->ia_atime.tv_sec = tmp;
iap->ia_atime.tv_nsec = tmp1 * 1000;
}
tmp = ntohl(*p++); tmp1 = ntohl(*p++);
if (tmp != (u32)-1 && tmp1 != (u32)-1) {
iap->ia_valid |= ATTR_MTIME | ATTR_MTIME_SET;
iap->ia_mtime.tv_sec = tmp;
iap->ia_mtime.tv_nsec = tmp1 * 1000;
/*
* Passing the invalid value useconds=1000000 for mtime
* is a Sun convention for "set both mtime and atime to
* current server time". It's needed to make permissions
* checks for the "touch" program across v2 mounts to
* Solaris and Irix boxes work correctly. See description of
* sattr in section 6.1 of "NFS Illustrated" by
* Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5
*/
if (tmp1 == 1000000)
iap->ia_valid &= ~(ATTR_ATIME_SET|ATTR_MTIME_SET);
}
return p;
}
static __be32 *
encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
struct kstat *stat)
{
struct dentry *dentry = fhp->fh_dentry;
int type;
struct timespec time;
u32 f;
type = (stat->mode & S_IFMT);
*p++ = htonl(nfs_ftypes[type >> 12]);
*p++ = htonl((u32) stat->mode);
*p++ = htonl((u32) stat->nlink);
*p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid));
*p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid));
if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) {
*p++ = htonl(NFS_MAXPATHLEN);
} else {
*p++ = htonl((u32) stat->size);
}
*p++ = htonl((u32) stat->blksize);
if (S_ISCHR(type) || S_ISBLK(type))
*p++ = htonl(new_encode_dev(stat->rdev));
else
*p++ = htonl(0xffffffff);
*p++ = htonl((u32) stat->blocks);
switch (fsid_source(fhp)) {
default:
case FSIDSOURCE_DEV:
*p++ = htonl(new_encode_dev(stat->dev));
break;
case FSIDSOURCE_FSID:
*p++ = htonl((u32) fhp->fh_export->ex_fsid);
break;
case FSIDSOURCE_UUID:
f = ((u32*)fhp->fh_export->ex_uuid)[0];
f ^= ((u32*)fhp->fh_export->ex_uuid)[1];
f ^= ((u32*)fhp->fh_export->ex_uuid)[2];
f ^= ((u32*)fhp->fh_export->ex_uuid)[3];
*p++ = htonl(f);
break;
}
*p++ = htonl((u32) stat->ino);
*p++ = htonl((u32) stat->atime.tv_sec);
*p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0);
lease_get_mtime(d_inode(dentry), &time);
*p++ = htonl((u32) time.tv_sec);
*p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0);
*p++ = htonl((u32) stat->ctime.tv_sec);
*p++ = htonl(stat->ctime.tv_nsec ? stat->ctime.tv_nsec / 1000 : 0);
return p;
}
/* Helper function for NFSv2 ACL code */
__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat)
{
return encode_fattr(rqstp, p, fhp, stat);
}
/*
* XDR decode functions
*/
int
nfssvc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
return xdr_argsize_check(rqstp, p);
}
int
nfssvc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *args)
{
p = decode_fh(p, &args->fh);
if (!p)
return 0;
return xdr_argsize_check(rqstp, p);
}
int
nfssvc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_sattrargs *args)
{
p = decode_fh(p, &args->fh);
if (!p)
return 0;
p = decode_sattr(p, &args->attrs);
return xdr_argsize_check(rqstp, p);
}
int
nfssvc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_diropargs *args)
{
if (!(p = decode_fh(p, &args->fh))
|| !(p = decode_filename(p, &args->name, &args->len)))
return 0;
return xdr_argsize_check(rqstp, p);
}
int
nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_readargs *args)
{
unsigned int len;
int v;
p = decode_fh(p, &args->fh);
if (!p)
return 0;
args->offset = ntohl(*p++);
len = args->count = ntohl(*p++);
p++; /* totalcount - unused */
len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2);
/* set up somewhere to store response.
* We take pages, put them on reslist and include in iovec
*/
v=0;
while (len > 0) {
struct page *p = *(rqstp->rq_next_page++);
rqstp->rq_vec[v].iov_base = page_address(p);
rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE);
len -= rqstp->rq_vec[v].iov_len;
v++;
}
args->vlen = v;
return xdr_argsize_check(rqstp, p);
}
int
nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_writeargs *args)
{
unsigned int len, hdr, dlen;
struct kvec *head = rqstp->rq_arg.head;
int v;
p = decode_fh(p, &args->fh);
if (!p)
return 0;
p++; /* beginoffset */
args->offset = ntohl(*p++); /* offset */
p++; /* totalcount */
len = args->len = ntohl(*p++);
/*
* The protocol specifies a maximum of 8192 bytes.
*/
if (len > NFSSVC_MAXBLKSIZE_V2)
return 0;
/*
* Check to make sure that we got the right number of
* bytes.
*/
hdr = (void*)p - head->iov_base;
if (hdr > head->iov_len)
return 0;
dlen = head->iov_len + rqstp->rq_arg.page_len - hdr;
/*
* Round the length of the data which was specified up to
* the next multiple of XDR units and then compare that
* against the length which was actually received.
* Note that when RPCSEC/GSS (for example) is used, the
* data buffer can be padded so dlen might be larger
* than required. It must never be smaller.
*/
if (dlen < XDR_QUADLEN(len)*4)
return 0;
rqstp->rq_vec[0].iov_base = (void*)p;
rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
v = 0;
while (len > rqstp->rq_vec[v].iov_len) {
len -= rqstp->rq_vec[v].iov_len;
v++;
rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_pages[v]);
rqstp->rq_vec[v].iov_len = PAGE_SIZE;
}
rqstp->rq_vec[v].iov_len = len;
args->vlen = v + 1;
return 1;
}
int
nfssvc_decode_createargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_createargs *args)
{
if ( !(p = decode_fh(p, &args->fh))
|| !(p = decode_filename(p, &args->name, &args->len)))
return 0;
p = decode_sattr(p, &args->attrs);
return xdr_argsize_check(rqstp, p);
}
int
nfssvc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_renameargs *args)
{
if (!(p = decode_fh(p, &args->ffh))
|| !(p = decode_filename(p, &args->fname, &args->flen))
|| !(p = decode_fh(p, &args->tfh))
|| !(p = decode_filename(p, &args->tname, &args->tlen)))
return 0;
return xdr_argsize_check(rqstp, p);
}
int
nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readlinkargs *args)
{
p = decode_fh(p, &args->fh);
if (!p)
return 0;
args->buffer = page_address(*(rqstp->rq_next_page++));
return xdr_argsize_check(rqstp, p);
}
int
nfssvc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_linkargs *args)
{
if (!(p = decode_fh(p, &args->ffh))
|| !(p = decode_fh(p, &args->tfh))
|| !(p = decode_filename(p, &args->tname, &args->tlen)))
return 0;
return xdr_argsize_check(rqstp, p);
}
int
nfssvc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_symlinkargs *args)
{
if ( !(p = decode_fh(p, &args->ffh))
|| !(p = decode_filename(p, &args->fname, &args->flen))
|| !(p = decode_pathname(p, &args->tname, &args->tlen)))
return 0;
p = decode_sattr(p, &args->attrs);
return xdr_argsize_check(rqstp, p);
}
int
nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_readdirargs *args)
{
p = decode_fh(p, &args->fh);
if (!p)
return 0;
args->cookie = ntohl(*p++);
args->count = ntohl(*p++);
args->count = min_t(u32, args->count, PAGE_SIZE);
args->buffer = page_address(*(rqstp->rq_next_page++));
return xdr_argsize_check(rqstp, p);
}
/*
* XDR encode functions
*/
int
nfssvc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
return xdr_ressize_check(rqstp, p);
}
int
nfssvc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_attrstat *resp)
{
p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
return xdr_ressize_check(rqstp, p);
}
int
nfssvc_encode_diropres(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_diropres *resp)
{
p = encode_fh(p, &resp->fh);
p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
return xdr_ressize_check(rqstp, p);
}
int
nfssvc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_readlinkres *resp)
{
*p++ = htonl(resp->len);
xdr_ressize_check(rqstp, p);
rqstp->rq_res.page_len = resp->len;
if (resp->len & 3) {
/* need to pad the tail */
rqstp->rq_res.tail[0].iov_base = p;
*p = 0;
rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3);
}
return 1;
}
int
nfssvc_encode_readres(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_readres *resp)
{
p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
*p++ = htonl(resp->count);
xdr_ressize_check(rqstp, p);
/* now update rqstp->rq_res to reflect data as well */
rqstp->rq_res.page_len = resp->count;
if (resp->count & 3) {
/* need to pad the tail */
rqstp->rq_res.tail[0].iov_base = p;
*p = 0;
rqstp->rq_res.tail[0].iov_len = 4 - (resp->count&3);
}
return 1;
}
int
nfssvc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_readdirres *resp)
{
xdr_ressize_check(rqstp, p);
p = resp->buffer;
*p++ = 0; /* no more entries */
*p++ = htonl((resp->common.err == nfserr_eof));
rqstp->rq_res.page_len = (((unsigned long)p-1) & ~PAGE_MASK)+1;
return 1;
}
int
nfssvc_encode_statfsres(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_statfsres *resp)
{
struct kstatfs *stat = &resp->stats;
*p++ = htonl(NFSSVC_MAXBLKSIZE_V2); /* max transfer size */
*p++ = htonl(stat->f_bsize);
*p++ = htonl(stat->f_blocks);
*p++ = htonl(stat->f_bfree);
*p++ = htonl(stat->f_bavail);
return xdr_ressize_check(rqstp, p);
}
int
nfssvc_encode_entry(void *ccdv, const char *name,
int namlen, loff_t offset, u64 ino, unsigned int d_type)
{
struct readdir_cd *ccd = ccdv;
struct nfsd_readdirres *cd = container_of(ccd, struct nfsd_readdirres, common);
__be32 *p = cd->buffer;
int buflen, slen;
/*
dprintk("nfsd: entry(%.*s off %ld ino %ld)\n",
namlen, name, offset, ino);
*/
if (offset > ~((u32) 0)) {
cd->common.err = nfserr_fbig;
return -EINVAL;
}
if (cd->offset)
*cd->offset = htonl(offset);
/* truncate filename */
namlen = min(namlen, NFS2_MAXNAMLEN);
slen = XDR_QUADLEN(namlen);
if ((buflen = cd->buflen - slen - 4) < 0) {
cd->common.err = nfserr_toosmall;
return -EINVAL;
}
if (ino > ~((u32) 0)) {
cd->common.err = nfserr_fbig;
return -EINVAL;
}
*p++ = xdr_one; /* mark entry present */
*p++ = htonl((u32) ino); /* file id */
p = xdr_encode_array(p, name, namlen);/* name length & name */
cd->offset = p; /* remember pointer */
*p++ = htonl(~0U); /* offset of next entry */
cd->buflen = buflen;
cd->buffer = p;
cd->common.err = nfs_ok;
return 0;
}
/*
* XDR release functions
*/
int
nfssvc_release_fhandle(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_fhandle *resp)
{
fh_put(&resp->fh);
return 1;
}
| ./CrossVul/dataset_final_sorted/CWE-404/c/bad_3351_7 |
crossvul-cpp_data_good_3351_5 | /*
* Copyright (c) 2001 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <kmsmith@umich.edu>
* Andy Adamson <kandros@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/ratelimit.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/addr.h>
#include <linux/jhash.h>
#include "xdr4.h"
#include "xdr4cb.h"
#include "vfs.h"
#include "current_stateid.h"
#include "netns.h"
#include "pnfs.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
#define all_ones {{~0,~0},~0}
static const stateid_t one_stateid = {
.si_generation = ~0,
.si_opaque = all_ones,
};
static const stateid_t zero_stateid = {
/* all fields zero */
};
static const stateid_t currentstateid = {
.si_generation = 1,
};
static u64 current_sessionid = 1;
#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
#define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
/* forward declarations */
static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
/* Locking: */
/*
* Currently used for the del_recall_lru and file hash table. In an
* effort to decrease the scope of the client_mutex, this spinlock may
* eventually cover more:
*/
static DEFINE_SPINLOCK(state_lock);
/*
* A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
* the refcount on the open stateid to drop.
*/
static DECLARE_WAIT_QUEUE_HEAD(close_wq);
static struct kmem_cache *openowner_slab;
static struct kmem_cache *lockowner_slab;
static struct kmem_cache *file_slab;
static struct kmem_cache *stateid_slab;
static struct kmem_cache *deleg_slab;
static struct kmem_cache *odstate_slab;
static void free_session(struct nfsd4_session *);
static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
static bool is_session_dead(struct nfsd4_session *ses)
{
return ses->se_flags & NFS4_SESSION_DEAD;
}
static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
{
if (atomic_read(&ses->se_ref) > ref_held_by_me)
return nfserr_jukebox;
ses->se_flags |= NFS4_SESSION_DEAD;
return nfs_ok;
}
static bool is_client_expired(struct nfs4_client *clp)
{
return clp->cl_time == 0;
}
static __be32 get_client_locked(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
if (is_client_expired(clp))
return nfserr_expired;
atomic_inc(&clp->cl_refcount);
return nfs_ok;
}
/* must be called under the client_lock */
static inline void
renew_client_locked(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
if (is_client_expired(clp)) {
WARN_ON(1);
printk("%s: client (clientid %08x/%08x) already expired\n",
__func__,
clp->cl_clientid.cl_boot,
clp->cl_clientid.cl_id);
return;
}
dprintk("renewing client (clientid %08x/%08x)\n",
clp->cl_clientid.cl_boot,
clp->cl_clientid.cl_id);
list_move_tail(&clp->cl_lru, &nn->client_lru);
clp->cl_time = get_seconds();
}
static void put_client_renew_locked(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
if (!atomic_dec_and_test(&clp->cl_refcount))
return;
if (!is_client_expired(clp))
renew_client_locked(clp);
}
static void put_client_renew(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
return;
if (!is_client_expired(clp))
renew_client_locked(clp);
spin_unlock(&nn->client_lock);
}
static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
{
__be32 status;
if (is_session_dead(ses))
return nfserr_badsession;
status = get_client_locked(ses->se_client);
if (status)
return status;
atomic_inc(&ses->se_ref);
return nfs_ok;
}
static void nfsd4_put_session_locked(struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
free_session(ses);
put_client_renew_locked(clp);
}
static void nfsd4_put_session(struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
spin_lock(&nn->client_lock);
nfsd4_put_session_locked(ses);
spin_unlock(&nn->client_lock);
}
static struct nfsd4_blocked_lock *
find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
struct nfsd_net *nn)
{
struct nfsd4_blocked_lock *cur, *found = NULL;
spin_lock(&nn->blocked_locks_lock);
list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
if (fh_match(fh, &cur->nbl_fh)) {
list_del_init(&cur->nbl_list);
list_del_init(&cur->nbl_lru);
found = cur;
break;
}
}
spin_unlock(&nn->blocked_locks_lock);
if (found)
posix_unblock_lock(&found->nbl_lock);
return found;
}
static struct nfsd4_blocked_lock *
find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
struct nfsd_net *nn)
{
struct nfsd4_blocked_lock *nbl;
nbl = find_blocked_lock(lo, fh, nn);
if (!nbl) {
nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
if (nbl) {
fh_copy_shallow(&nbl->nbl_fh, fh);
locks_init_lock(&nbl->nbl_lock);
nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
&nfsd4_cb_notify_lock_ops,
NFSPROC4_CLNT_CB_NOTIFY_LOCK);
}
}
return nbl;
}
static void
free_blocked_lock(struct nfsd4_blocked_lock *nbl)
{
locks_release_private(&nbl->nbl_lock);
kfree(nbl);
}
static int
nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
{
/*
* Since this is just an optimization, we don't try very hard if it
* turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
* just quit trying on anything else.
*/
switch (task->tk_status) {
case -NFS4ERR_DELAY:
rpc_delay(task, 1 * HZ);
return 0;
default:
return 1;
}
}
static void
nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
{
struct nfsd4_blocked_lock *nbl = container_of(cb,
struct nfsd4_blocked_lock, nbl_cb);
free_blocked_lock(nbl);
}
static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
.done = nfsd4_cb_notify_lock_done,
.release = nfsd4_cb_notify_lock_release,
};
static inline struct nfs4_stateowner *
nfs4_get_stateowner(struct nfs4_stateowner *sop)
{
atomic_inc(&sop->so_count);
return sop;
}
static int
same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
{
return (sop->so_owner.len == owner->len) &&
0 == memcmp(sop->so_owner.data, owner->data, owner->len);
}
static struct nfs4_openowner *
find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
struct nfs4_client *clp)
{
struct nfs4_stateowner *so;
lockdep_assert_held(&clp->cl_lock);
list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
so_strhash) {
if (!so->so_is_open_owner)
continue;
if (same_owner_str(so, &open->op_owner))
return openowner(nfs4_get_stateowner(so));
}
return NULL;
}
static struct nfs4_openowner *
find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
struct nfs4_client *clp)
{
struct nfs4_openowner *oo;
spin_lock(&clp->cl_lock);
oo = find_openstateowner_str_locked(hashval, open, clp);
spin_unlock(&clp->cl_lock);
return oo;
}
static inline u32
opaque_hashval(const void *ptr, int nbytes)
{
unsigned char *cptr = (unsigned char *) ptr;
u32 x = 0;
while (nbytes--) {
x *= 37;
x += *cptr++;
}
return x;
}
static void nfsd4_free_file_rcu(struct rcu_head *rcu)
{
struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
kmem_cache_free(file_slab, fp);
}
void
put_nfs4_file(struct nfs4_file *fi)
{
might_lock(&state_lock);
if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
hlist_del_rcu(&fi->fi_hash);
spin_unlock(&state_lock);
WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
}
}
static struct file *
__nfs4_get_fd(struct nfs4_file *f, int oflag)
{
if (f->fi_fds[oflag])
return get_file(f->fi_fds[oflag]);
return NULL;
}
static struct file *
find_writeable_file_locked(struct nfs4_file *f)
{
struct file *ret;
lockdep_assert_held(&f->fi_lock);
ret = __nfs4_get_fd(f, O_WRONLY);
if (!ret)
ret = __nfs4_get_fd(f, O_RDWR);
return ret;
}
static struct file *
find_writeable_file(struct nfs4_file *f)
{
struct file *ret;
spin_lock(&f->fi_lock);
ret = find_writeable_file_locked(f);
spin_unlock(&f->fi_lock);
return ret;
}
static struct file *find_readable_file_locked(struct nfs4_file *f)
{
struct file *ret;
lockdep_assert_held(&f->fi_lock);
ret = __nfs4_get_fd(f, O_RDONLY);
if (!ret)
ret = __nfs4_get_fd(f, O_RDWR);
return ret;
}
static struct file *
find_readable_file(struct nfs4_file *f)
{
struct file *ret;
spin_lock(&f->fi_lock);
ret = find_readable_file_locked(f);
spin_unlock(&f->fi_lock);
return ret;
}
struct file *
find_any_file(struct nfs4_file *f)
{
struct file *ret;
spin_lock(&f->fi_lock);
ret = __nfs4_get_fd(f, O_RDWR);
if (!ret) {
ret = __nfs4_get_fd(f, O_WRONLY);
if (!ret)
ret = __nfs4_get_fd(f, O_RDONLY);
}
spin_unlock(&f->fi_lock);
return ret;
}
static atomic_long_t num_delegations;
unsigned long max_delegations;
/*
* Open owner state (share locks)
*/
/* hash tables for lock and open owners */
#define OWNER_HASH_BITS 8
#define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
#define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
{
unsigned int ret;
ret = opaque_hashval(ownername->data, ownername->len);
return ret & OWNER_HASH_MASK;
}
/* hash table for nfs4_file */
#define FILE_HASH_BITS 8
#define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
{
return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
}
static unsigned int file_hashval(struct knfsd_fh *fh)
{
return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
}
static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
static void
__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
{
lockdep_assert_held(&fp->fi_lock);
if (access & NFS4_SHARE_ACCESS_WRITE)
atomic_inc(&fp->fi_access[O_WRONLY]);
if (access & NFS4_SHARE_ACCESS_READ)
atomic_inc(&fp->fi_access[O_RDONLY]);
}
static __be32
nfs4_file_get_access(struct nfs4_file *fp, u32 access)
{
lockdep_assert_held(&fp->fi_lock);
/* Does this access mode make sense? */
if (access & ~NFS4_SHARE_ACCESS_BOTH)
return nfserr_inval;
/* Does it conflict with a deny mode already set? */
if ((access & fp->fi_share_deny) != 0)
return nfserr_share_denied;
__nfs4_file_get_access(fp, access);
return nfs_ok;
}
static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
{
/* Common case is that there is no deny mode. */
if (deny) {
/* Does this deny mode make sense? */
if (deny & ~NFS4_SHARE_DENY_BOTH)
return nfserr_inval;
if ((deny & NFS4_SHARE_DENY_READ) &&
atomic_read(&fp->fi_access[O_RDONLY]))
return nfserr_share_denied;
if ((deny & NFS4_SHARE_DENY_WRITE) &&
atomic_read(&fp->fi_access[O_WRONLY]))
return nfserr_share_denied;
}
return nfs_ok;
}
static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
{
might_lock(&fp->fi_lock);
if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
struct file *f1 = NULL;
struct file *f2 = NULL;
swap(f1, fp->fi_fds[oflag]);
if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
swap(f2, fp->fi_fds[O_RDWR]);
spin_unlock(&fp->fi_lock);
if (f1)
fput(f1);
if (f2)
fput(f2);
}
}
static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
{
WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
if (access & NFS4_SHARE_ACCESS_WRITE)
__nfs4_file_put_access(fp, O_WRONLY);
if (access & NFS4_SHARE_ACCESS_READ)
__nfs4_file_put_access(fp, O_RDONLY);
}
/*
* Allocate a new open/delegation state counter. This is needed for
* pNFS for proper return on close semantics.
*
* Note that we only allocate it for pNFS-enabled exports, otherwise
* all pointers to struct nfs4_clnt_odstate are always NULL.
*/
static struct nfs4_clnt_odstate *
alloc_clnt_odstate(struct nfs4_client *clp)
{
struct nfs4_clnt_odstate *co;
co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
if (co) {
co->co_client = clp;
atomic_set(&co->co_odcount, 1);
}
return co;
}
static void
hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
{
struct nfs4_file *fp = co->co_file;
lockdep_assert_held(&fp->fi_lock);
list_add(&co->co_perfile, &fp->fi_clnt_odstate);
}
static inline void
get_clnt_odstate(struct nfs4_clnt_odstate *co)
{
if (co)
atomic_inc(&co->co_odcount);
}
static void
put_clnt_odstate(struct nfs4_clnt_odstate *co)
{
struct nfs4_file *fp;
if (!co)
return;
fp = co->co_file;
if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
list_del(&co->co_perfile);
spin_unlock(&fp->fi_lock);
nfsd4_return_all_file_layouts(co->co_client, fp);
kmem_cache_free(odstate_slab, co);
}
}
static struct nfs4_clnt_odstate *
find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
{
struct nfs4_clnt_odstate *co;
struct nfs4_client *cl;
if (!new)
return NULL;
cl = new->co_client;
spin_lock(&fp->fi_lock);
list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
if (co->co_client == cl) {
get_clnt_odstate(co);
goto out;
}
}
co = new;
co->co_file = fp;
hash_clnt_odstate_locked(new);
out:
spin_unlock(&fp->fi_lock);
return co;
}
struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
void (*sc_free)(struct nfs4_stid *))
{
struct nfs4_stid *stid;
int new_id;
stid = kmem_cache_zalloc(slab, GFP_KERNEL);
if (!stid)
return NULL;
idr_preload(GFP_KERNEL);
spin_lock(&cl->cl_lock);
new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
spin_unlock(&cl->cl_lock);
idr_preload_end();
if (new_id < 0)
goto out_free;
stid->sc_free = sc_free;
stid->sc_client = cl;
stid->sc_stateid.si_opaque.so_id = new_id;
stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
/* Will be incremented before return to client: */
atomic_set(&stid->sc_count, 1);
spin_lock_init(&stid->sc_lock);
/*
* It shouldn't be a problem to reuse an opaque stateid value.
* I don't think it is for 4.1. But with 4.0 I worry that, for
* example, a stray write retransmission could be accepted by
* the server when it should have been rejected. Therefore,
* adopt a trick from the sctp code to attempt to maximize the
* amount of time until an id is reused, by ensuring they always
* "increase" (mod INT_MAX):
*/
return stid;
out_free:
kmem_cache_free(slab, stid);
return NULL;
}
static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
{
struct nfs4_stid *stid;
stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
if (!stid)
return NULL;
return openlockstateid(stid);
}
static void nfs4_free_deleg(struct nfs4_stid *stid)
{
kmem_cache_free(deleg_slab, stid);
atomic_long_dec(&num_delegations);
}
/*
* When we recall a delegation, we should be careful not to hand it
* out again straight away.
* To ensure this we keep a pair of bloom filters ('new' and 'old')
* in which the filehandles of recalled delegations are "stored".
* If a filehandle appear in either filter, a delegation is blocked.
* When a delegation is recalled, the filehandle is stored in the "new"
* filter.
* Every 30 seconds we swap the filters and clear the "new" one,
* unless both are empty of course.
*
* Each filter is 256 bits. We hash the filehandle to 32bit and use the
* low 3 bytes as hash-table indices.
*
* 'blocked_delegations_lock', which is always taken in block_delegations(),
* is used to manage concurrent access. Testing does not need the lock
* except when swapping the two filters.
*/
static DEFINE_SPINLOCK(blocked_delegations_lock);
static struct bloom_pair {
int entries, old_entries;
time_t swap_time;
int new; /* index into 'set' */
DECLARE_BITMAP(set[2], 256);
} blocked_delegations;
static int delegation_blocked(struct knfsd_fh *fh)
{
u32 hash;
struct bloom_pair *bd = &blocked_delegations;
if (bd->entries == 0)
return 0;
if (seconds_since_boot() - bd->swap_time > 30) {
spin_lock(&blocked_delegations_lock);
if (seconds_since_boot() - bd->swap_time > 30) {
bd->entries -= bd->old_entries;
bd->old_entries = bd->entries;
memset(bd->set[bd->new], 0,
sizeof(bd->set[0]));
bd->new = 1-bd->new;
bd->swap_time = seconds_since_boot();
}
spin_unlock(&blocked_delegations_lock);
}
hash = jhash(&fh->fh_base, fh->fh_size, 0);
if (test_bit(hash&255, bd->set[0]) &&
test_bit((hash>>8)&255, bd->set[0]) &&
test_bit((hash>>16)&255, bd->set[0]))
return 1;
if (test_bit(hash&255, bd->set[1]) &&
test_bit((hash>>8)&255, bd->set[1]) &&
test_bit((hash>>16)&255, bd->set[1]))
return 1;
return 0;
}
static void block_delegations(struct knfsd_fh *fh)
{
u32 hash;
struct bloom_pair *bd = &blocked_delegations;
hash = jhash(&fh->fh_base, fh->fh_size, 0);
spin_lock(&blocked_delegations_lock);
__set_bit(hash&255, bd->set[bd->new]);
__set_bit((hash>>8)&255, bd->set[bd->new]);
__set_bit((hash>>16)&255, bd->set[bd->new]);
if (bd->entries == 0)
bd->swap_time = seconds_since_boot();
bd->entries += 1;
spin_unlock(&blocked_delegations_lock);
}
static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
struct nfs4_clnt_odstate *odstate)
{
struct nfs4_delegation *dp;
long n;
dprintk("NFSD alloc_init_deleg\n");
n = atomic_long_inc_return(&num_delegations);
if (n < 0 || n > max_delegations)
goto out_dec;
if (delegation_blocked(¤t_fh->fh_handle))
goto out_dec;
dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
if (dp == NULL)
goto out_dec;
/*
* delegation seqid's are never incremented. The 4.1 special
* meaning of seqid 0 isn't meaningful, really, but let's avoid
* 0 anyway just for consistency and use 1:
*/
dp->dl_stid.sc_stateid.si_generation = 1;
INIT_LIST_HEAD(&dp->dl_perfile);
INIT_LIST_HEAD(&dp->dl_perclnt);
INIT_LIST_HEAD(&dp->dl_recall_lru);
dp->dl_clnt_odstate = odstate;
get_clnt_odstate(odstate);
dp->dl_type = NFS4_OPEN_DELEGATE_READ;
dp->dl_retries = 1;
nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
&nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
return dp;
out_dec:
atomic_long_dec(&num_delegations);
return NULL;
}
void
nfs4_put_stid(struct nfs4_stid *s)
{
struct nfs4_file *fp = s->sc_file;
struct nfs4_client *clp = s->sc_client;
might_lock(&clp->cl_lock);
if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
wake_up_all(&close_wq);
return;
}
idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
spin_unlock(&clp->cl_lock);
s->sc_free(s);
if (fp)
put_nfs4_file(fp);
}
void
nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
{
stateid_t *src = &stid->sc_stateid;
spin_lock(&stid->sc_lock);
if (unlikely(++src->si_generation == 0))
src->si_generation = 1;
memcpy(dst, src, sizeof(*dst));
spin_unlock(&stid->sc_lock);
}
static void nfs4_put_deleg_lease(struct nfs4_file *fp)
{
struct file *filp = NULL;
spin_lock(&fp->fi_lock);
if (fp->fi_deleg_file && --fp->fi_delegees == 0)
swap(filp, fp->fi_deleg_file);
spin_unlock(&fp->fi_lock);
if (filp) {
vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp);
fput(filp);
}
}
void nfs4_unhash_stid(struct nfs4_stid *s)
{
s->sc_type = 0;
}
/**
* nfs4_get_existing_delegation - Discover if this delegation already exists
* @clp: a pointer to the nfs4_client we're granting a delegation to
* @fp: a pointer to the nfs4_file we're granting a delegation on
*
* Return:
* On success: NULL if an existing delegation was not found.
*
* On error: -EAGAIN if one was previously granted to this nfs4_client
* for this nfs4_file.
*
*/
static int
nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
{
struct nfs4_delegation *searchdp = NULL;
struct nfs4_client *searchclp = NULL;
lockdep_assert_held(&state_lock);
lockdep_assert_held(&fp->fi_lock);
list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
searchclp = searchdp->dl_stid.sc_client;
if (clp == searchclp) {
return -EAGAIN;
}
}
return 0;
}
/**
* hash_delegation_locked - Add a delegation to the appropriate lists
* @dp: a pointer to the nfs4_delegation we are adding.
* @fp: a pointer to the nfs4_file we're granting a delegation on
*
* Return:
* On success: NULL if the delegation was successfully hashed.
*
* On error: -EAGAIN if one was previously granted to this
* nfs4_client for this nfs4_file. Delegation is not hashed.
*
*/
static int
hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
{
int status;
struct nfs4_client *clp = dp->dl_stid.sc_client;
lockdep_assert_held(&state_lock);
lockdep_assert_held(&fp->fi_lock);
status = nfs4_get_existing_delegation(clp, fp);
if (status)
return status;
++fp->fi_delegees;
atomic_inc(&dp->dl_stid.sc_count);
dp->dl_stid.sc_type = NFS4_DELEG_STID;
list_add(&dp->dl_perfile, &fp->fi_delegations);
list_add(&dp->dl_perclnt, &clp->cl_delegations);
return 0;
}
static bool
unhash_delegation_locked(struct nfs4_delegation *dp)
{
struct nfs4_file *fp = dp->dl_stid.sc_file;
lockdep_assert_held(&state_lock);
if (list_empty(&dp->dl_perfile))
return false;
dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
/* Ensure that deleg break won't try to requeue it */
++dp->dl_time;
spin_lock(&fp->fi_lock);
list_del_init(&dp->dl_perclnt);
list_del_init(&dp->dl_recall_lru);
list_del_init(&dp->dl_perfile);
spin_unlock(&fp->fi_lock);
return true;
}
static void destroy_delegation(struct nfs4_delegation *dp)
{
bool unhashed;
spin_lock(&state_lock);
unhashed = unhash_delegation_locked(dp);
spin_unlock(&state_lock);
if (unhashed) {
put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_deleg_lease(dp->dl_stid.sc_file);
nfs4_put_stid(&dp->dl_stid);
}
}
static void revoke_delegation(struct nfs4_delegation *dp)
{
struct nfs4_client *clp = dp->dl_stid.sc_client;
WARN_ON(!list_empty(&dp->dl_recall_lru));
put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_deleg_lease(dp->dl_stid.sc_file);
if (clp->cl_minorversion == 0)
nfs4_put_stid(&dp->dl_stid);
else {
dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
spin_lock(&clp->cl_lock);
list_add(&dp->dl_recall_lru, &clp->cl_revoked);
spin_unlock(&clp->cl_lock);
}
}
/*
* SETCLIENTID state
*/
static unsigned int clientid_hashval(u32 id)
{
return id & CLIENT_HASH_MASK;
}
static unsigned int clientstr_hashval(const char *name)
{
return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
}
/*
* We store the NONE, READ, WRITE, and BOTH bits separately in the
* st_{access,deny}_bmap field of the stateid, in order to track not
* only what share bits are currently in force, but also what
* combinations of share bits previous opens have used. This allows us
* to enforce the recommendation of rfc 3530 14.2.19 that the server
* return an error if the client attempt to downgrade to a combination
* of share bits not explicable by closing some of its previous opens.
*
* XXX: This enforcement is actually incomplete, since we don't keep
* track of access/deny bit combinations; so, e.g., we allow:
*
* OPEN allow read, deny write
* OPEN allow both, deny none
* DOWNGRADE allow read, deny none
*
* which we should reject.
*/
static unsigned int
bmap_to_share_mode(unsigned long bmap) {
int i;
unsigned int access = 0;
for (i = 1; i < 4; i++) {
if (test_bit(i, &bmap))
access |= i;
}
return access;
}
/* set share access for a given stateid */
static inline void
set_access(u32 access, struct nfs4_ol_stateid *stp)
{
unsigned char mask = 1 << access;
WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
stp->st_access_bmap |= mask;
}
/* clear share access for a given stateid */
static inline void
clear_access(u32 access, struct nfs4_ol_stateid *stp)
{
unsigned char mask = 1 << access;
WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
stp->st_access_bmap &= ~mask;
}
/* test whether a given stateid has access */
static inline bool
test_access(u32 access, struct nfs4_ol_stateid *stp)
{
unsigned char mask = 1 << access;
return (bool)(stp->st_access_bmap & mask);
}
/* set share deny for a given stateid */
static inline void
set_deny(u32 deny, struct nfs4_ol_stateid *stp)
{
unsigned char mask = 1 << deny;
WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
stp->st_deny_bmap |= mask;
}
/* clear share deny for a given stateid */
static inline void
clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
{
unsigned char mask = 1 << deny;
WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
stp->st_deny_bmap &= ~mask;
}
/* test whether a given stateid is denying specific access */
static inline bool
test_deny(u32 deny, struct nfs4_ol_stateid *stp)
{
unsigned char mask = 1 << deny;
return (bool)(stp->st_deny_bmap & mask);
}
static int nfs4_access_to_omode(u32 access)
{
switch (access & NFS4_SHARE_ACCESS_BOTH) {
case NFS4_SHARE_ACCESS_READ:
return O_RDONLY;
case NFS4_SHARE_ACCESS_WRITE:
return O_WRONLY;
case NFS4_SHARE_ACCESS_BOTH:
return O_RDWR;
}
WARN_ON_ONCE(1);
return O_RDONLY;
}
/*
* A stateid that had a deny mode associated with it is being released
* or downgraded. Recalculate the deny mode on the file.
*/
static void
recalculate_deny_mode(struct nfs4_file *fp)
{
struct nfs4_ol_stateid *stp;
spin_lock(&fp->fi_lock);
fp->fi_share_deny = 0;
list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
spin_unlock(&fp->fi_lock);
}
static void
reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
{
int i;
bool change = false;
for (i = 1; i < 4; i++) {
if ((i & deny) != i) {
change = true;
clear_deny(i, stp);
}
}
/* Recalculate per-file deny mode if there was a change */
if (change)
recalculate_deny_mode(stp->st_stid.sc_file);
}
/* release all access and file references for a given stateid */
static void
release_all_access(struct nfs4_ol_stateid *stp)
{
int i;
struct nfs4_file *fp = stp->st_stid.sc_file;
if (fp && stp->st_deny_bmap != 0)
recalculate_deny_mode(fp);
for (i = 1; i < 4; i++) {
if (test_access(i, stp))
nfs4_file_put_access(stp->st_stid.sc_file, i);
clear_access(i, stp);
}
}
static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
{
kfree(sop->so_owner.data);
sop->so_ops->so_free(sop);
}
static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
{
struct nfs4_client *clp = sop->so_client;
might_lock(&clp->cl_lock);
if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
return;
sop->so_ops->so_unhash(sop);
spin_unlock(&clp->cl_lock);
nfs4_free_stateowner(sop);
}
static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
{
struct nfs4_file *fp = stp->st_stid.sc_file;
lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
if (list_empty(&stp->st_perfile))
return false;
spin_lock(&fp->fi_lock);
list_del_init(&stp->st_perfile);
spin_unlock(&fp->fi_lock);
list_del(&stp->st_perstateowner);
return true;
}
static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
{
struct nfs4_ol_stateid *stp = openlockstateid(stid);
put_clnt_odstate(stp->st_clnt_odstate);
release_all_access(stp);
if (stp->st_stateowner)
nfs4_put_stateowner(stp->st_stateowner);
kmem_cache_free(stateid_slab, stid);
}
static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
{
struct nfs4_ol_stateid *stp = openlockstateid(stid);
struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
struct file *file;
file = find_any_file(stp->st_stid.sc_file);
if (file)
filp_close(file, (fl_owner_t)lo);
nfs4_free_ol_stateid(stid);
}
/*
* Put the persistent reference to an already unhashed generic stateid, while
* holding the cl_lock. If it's the last reference, then put it onto the
* reaplist for later destruction.
*/
static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
struct list_head *reaplist)
{
struct nfs4_stid *s = &stp->st_stid;
struct nfs4_client *clp = s->sc_client;
lockdep_assert_held(&clp->cl_lock);
WARN_ON_ONCE(!list_empty(&stp->st_locks));
if (!atomic_dec_and_test(&s->sc_count)) {
wake_up_all(&close_wq);
return;
}
idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
list_add(&stp->st_locks, reaplist);
}
static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
{
lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
list_del_init(&stp->st_locks);
nfs4_unhash_stid(&stp->st_stid);
return unhash_ol_stateid(stp);
}
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
{
struct nfs4_client *clp = stp->st_stid.sc_client;
bool unhashed;
spin_lock(&clp->cl_lock);
unhashed = unhash_lock_stateid(stp);
spin_unlock(&clp->cl_lock);
if (unhashed)
nfs4_put_stid(&stp->st_stid);
}
static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
{
struct nfs4_client *clp = lo->lo_owner.so_client;
lockdep_assert_held(&clp->cl_lock);
list_del_init(&lo->lo_owner.so_strhash);
}
/*
* Free a list of generic stateids that were collected earlier after being
* fully unhashed.
*/
static void
free_ol_stateid_reaplist(struct list_head *reaplist)
{
struct nfs4_ol_stateid *stp;
struct nfs4_file *fp;
might_sleep();
while (!list_empty(reaplist)) {
stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
st_locks);
list_del(&stp->st_locks);
fp = stp->st_stid.sc_file;
stp->st_stid.sc_free(&stp->st_stid);
if (fp)
put_nfs4_file(fp);
}
}
static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
struct list_head *reaplist)
{
struct nfs4_ol_stateid *stp;
lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
while (!list_empty(&open_stp->st_locks)) {
stp = list_entry(open_stp->st_locks.next,
struct nfs4_ol_stateid, st_locks);
WARN_ON(!unhash_lock_stateid(stp));
put_ol_stateid_locked(stp, reaplist);
}
}
static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
struct list_head *reaplist)
{
bool unhashed;
lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
unhashed = unhash_ol_stateid(stp);
release_open_stateid_locks(stp, reaplist);
return unhashed;
}
static void release_open_stateid(struct nfs4_ol_stateid *stp)
{
LIST_HEAD(reaplist);
spin_lock(&stp->st_stid.sc_client->cl_lock);
if (unhash_open_stateid(stp, &reaplist))
put_ol_stateid_locked(stp, &reaplist);
spin_unlock(&stp->st_stid.sc_client->cl_lock);
free_ol_stateid_reaplist(&reaplist);
}
static void unhash_openowner_locked(struct nfs4_openowner *oo)
{
struct nfs4_client *clp = oo->oo_owner.so_client;
lockdep_assert_held(&clp->cl_lock);
list_del_init(&oo->oo_owner.so_strhash);
list_del_init(&oo->oo_perclient);
}
static void release_last_closed_stateid(struct nfs4_openowner *oo)
{
struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
nfsd_net_id);
struct nfs4_ol_stateid *s;
spin_lock(&nn->client_lock);
s = oo->oo_last_closed_stid;
if (s) {
list_del_init(&oo->oo_close_lru);
oo->oo_last_closed_stid = NULL;
}
spin_unlock(&nn->client_lock);
if (s)
nfs4_put_stid(&s->st_stid);
}
static void release_openowner(struct nfs4_openowner *oo)
{
struct nfs4_ol_stateid *stp;
struct nfs4_client *clp = oo->oo_owner.so_client;
struct list_head reaplist;
INIT_LIST_HEAD(&reaplist);
spin_lock(&clp->cl_lock);
unhash_openowner_locked(oo);
while (!list_empty(&oo->oo_owner.so_stateids)) {
stp = list_first_entry(&oo->oo_owner.so_stateids,
struct nfs4_ol_stateid, st_perstateowner);
if (unhash_open_stateid(stp, &reaplist))
put_ol_stateid_locked(stp, &reaplist);
}
spin_unlock(&clp->cl_lock);
free_ol_stateid_reaplist(&reaplist);
release_last_closed_stateid(oo);
nfs4_put_stateowner(&oo->oo_owner);
}
static inline int
hash_sessionid(struct nfs4_sessionid *sessionid)
{
struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
return sid->sequence % SESSION_HASH_SIZE;
}
#ifdef CONFIG_SUNRPC_DEBUG
static inline void
dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
{
u32 *ptr = (u32 *)(&sessionid->data[0]);
dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
}
#else
static inline void
dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
{
}
#endif
/*
* Bump the seqid on cstate->replay_owner, and clear replay_owner if it
* won't be used for replay.
*/
void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
{
struct nfs4_stateowner *so = cstate->replay_owner;
if (nfserr == nfserr_replay_me)
return;
if (!seqid_mutating_err(ntohl(nfserr))) {
nfsd4_cstate_clear_replay(cstate);
return;
}
if (!so)
return;
if (so->so_is_open_owner)
release_last_closed_stateid(openowner(so));
so->so_seqid++;
return;
}
static void
gen_sessionid(struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
struct nfsd4_sessionid *sid;
sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
sid->clientid = clp->cl_clientid;
sid->sequence = current_sessionid++;
sid->reserved = 0;
}
/*
* The protocol defines ca_maxresponssize_cached to include the size of
* the rpc header, but all we need to cache is the data starting after
* the end of the initial SEQUENCE operation--the rest we regenerate
* each time. Therefore we can advertise a ca_maxresponssize_cached
* value that is the number of bytes in our cache plus a few additional
* bytes. In order to stay on the safe side, and not promise more than
* we can cache, those additional bytes must be the minimum possible: 24
* bytes of rpc header (xid through accept state, with AUTH_NULL
* verifier), 12 for the compound header (with zero-length tag), and 44
* for the SEQUENCE op response:
*/
#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
static void
free_session_slots(struct nfsd4_session *ses)
{
int i;
for (i = 0; i < ses->se_fchannel.maxreqs; i++)
kfree(ses->se_slots[i]);
}
/*
* We don't actually need to cache the rpc and session headers, so we
* can allocate a little less for each slot:
*/
static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
{
u32 size;
if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
size = 0;
else
size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
return size + sizeof(struct nfsd4_slot);
}
/*
* XXX: If we run out of reserved DRC memory we could (up to a point)
* re-negotiate active sessions and reduce their slot usage to make
* room for new connections. For now we just fail the create session.
*/
static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
{
u32 slotsize = slot_bytes(ca);
u32 num = ca->maxreqs;
int avail;
spin_lock(&nfsd_drc_lock);
avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
nfsd_drc_max_mem - nfsd_drc_mem_used);
num = min_t(int, num, avail / slotsize);
nfsd_drc_mem_used += num * slotsize;
spin_unlock(&nfsd_drc_lock);
return num;
}
static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
{
int slotsize = slot_bytes(ca);
spin_lock(&nfsd_drc_lock);
nfsd_drc_mem_used -= slotsize * ca->maxreqs;
spin_unlock(&nfsd_drc_lock);
}
static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
struct nfsd4_channel_attrs *battrs)
{
int numslots = fattrs->maxreqs;
int slotsize = slot_bytes(fattrs);
struct nfsd4_session *new;
int mem, i;
BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
+ sizeof(struct nfsd4_session) > PAGE_SIZE);
mem = numslots * sizeof(struct nfsd4_slot *);
new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
if (!new)
return NULL;
/* allocate each struct nfsd4_slot and data cache in one piece */
for (i = 0; i < numslots; i++) {
new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
if (!new->se_slots[i])
goto out_free;
}
memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
return new;
out_free:
while (i--)
kfree(new->se_slots[i]);
kfree(new);
return NULL;
}
static void free_conn(struct nfsd4_conn *c)
{
svc_xprt_put(c->cn_xprt);
kfree(c);
}
static void nfsd4_conn_lost(struct svc_xpt_user *u)
{
struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
struct nfs4_client *clp = c->cn_session->se_client;
spin_lock(&clp->cl_lock);
if (!list_empty(&c->cn_persession)) {
list_del(&c->cn_persession);
free_conn(c);
}
nfsd4_probe_callback(clp);
spin_unlock(&clp->cl_lock);
}
static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
{
struct nfsd4_conn *conn;
conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
if (!conn)
return NULL;
svc_xprt_get(rqstp->rq_xprt);
conn->cn_xprt = rqstp->rq_xprt;
conn->cn_flags = flags;
INIT_LIST_HEAD(&conn->cn_xpt_user.list);
return conn;
}
static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
{
conn->cn_session = ses;
list_add(&conn->cn_persession, &ses->se_conns);
}
static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
spin_lock(&clp->cl_lock);
__nfsd4_hash_conn(conn, ses);
spin_unlock(&clp->cl_lock);
}
static int nfsd4_register_conn(struct nfsd4_conn *conn)
{
conn->cn_xpt_user.callback = nfsd4_conn_lost;
return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
}
static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
{
int ret;
nfsd4_hash_conn(conn, ses);
ret = nfsd4_register_conn(conn);
if (ret)
/* oops; xprt is already down: */
nfsd4_conn_lost(&conn->cn_xpt_user);
/* We may have gained or lost a callback channel: */
nfsd4_probe_callback_sync(ses->se_client);
}
static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
{
u32 dir = NFS4_CDFC4_FORE;
if (cses->flags & SESSION4_BACK_CHAN)
dir |= NFS4_CDFC4_BACK;
return alloc_conn(rqstp, dir);
}
/* must be called under client_lock */
static void nfsd4_del_conns(struct nfsd4_session *s)
{
struct nfs4_client *clp = s->se_client;
struct nfsd4_conn *c;
spin_lock(&clp->cl_lock);
while (!list_empty(&s->se_conns)) {
c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
list_del_init(&c->cn_persession);
spin_unlock(&clp->cl_lock);
unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
free_conn(c);
spin_lock(&clp->cl_lock);
}
spin_unlock(&clp->cl_lock);
}
static void __free_session(struct nfsd4_session *ses)
{
free_session_slots(ses);
kfree(ses);
}
static void free_session(struct nfsd4_session *ses)
{
nfsd4_del_conns(ses);
nfsd4_put_drc_mem(&ses->se_fchannel);
__free_session(ses);
}
static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
{
int idx;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
new->se_client = clp;
gen_sessionid(new);
INIT_LIST_HEAD(&new->se_conns);
new->se_cb_seq_nr = 1;
new->se_flags = cses->flags;
new->se_cb_prog = cses->callback_prog;
new->se_cb_sec = cses->cb_sec;
atomic_set(&new->se_ref, 0);
idx = hash_sessionid(&new->se_sessionid);
list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
spin_lock(&clp->cl_lock);
list_add(&new->se_perclnt, &clp->cl_sessions);
spin_unlock(&clp->cl_lock);
{
struct sockaddr *sa = svc_addr(rqstp);
/*
* This is a little silly; with sessions there's no real
* use for the callback address. Use the peer address
* as a reasonable default for now, but consider fixing
* the rpc client not to require an address in the
* future:
*/
rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
}
}
/* caller must hold client_lock */
static struct nfsd4_session *
__find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
{
struct nfsd4_session *elem;
int idx;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
dump_sessionid(__func__, sessionid);
idx = hash_sessionid(sessionid);
/* Search in the appropriate list */
list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
if (!memcmp(elem->se_sessionid.data, sessionid->data,
NFS4_MAX_SESSIONID_LEN)) {
return elem;
}
}
dprintk("%s: session not found\n", __func__);
return NULL;
}
static struct nfsd4_session *
find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
__be32 *ret)
{
struct nfsd4_session *session;
__be32 status = nfserr_badsession;
session = __find_in_sessionid_hashtbl(sessionid, net);
if (!session)
goto out;
status = nfsd4_get_session_locked(session);
if (status)
session = NULL;
out:
*ret = status;
return session;
}
/* caller must hold client_lock */
static void
unhash_session(struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
list_del(&ses->se_hash);
spin_lock(&ses->se_client->cl_lock);
list_del(&ses->se_perclnt);
spin_unlock(&ses->se_client->cl_lock);
}
/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
static int
STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
{
/*
* We're assuming the clid was not given out from a boot
* precisely 2^32 (about 136 years) before this one. That seems
* a safe assumption:
*/
if (clid->cl_boot == (u32)nn->boot_time)
return 0;
dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
clid->cl_boot, clid->cl_id, nn->boot_time);
return 1;
}
/*
* XXX Should we use a slab cache ?
* This type of memory management is somewhat inefficient, but we use it
* anyway since SETCLIENTID is not a common operation.
*/
static struct nfs4_client *alloc_client(struct xdr_netobj name)
{
struct nfs4_client *clp;
int i;
clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
if (clp == NULL)
return NULL;
clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
if (clp->cl_name.data == NULL)
goto err_no_name;
clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
OWNER_HASH_SIZE, GFP_KERNEL);
if (!clp->cl_ownerstr_hashtbl)
goto err_no_hashtbl;
for (i = 0; i < OWNER_HASH_SIZE; i++)
INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
clp->cl_name.len = name.len;
INIT_LIST_HEAD(&clp->cl_sessions);
idr_init(&clp->cl_stateids);
atomic_set(&clp->cl_refcount, 0);
clp->cl_cb_state = NFSD4_CB_UNKNOWN;
INIT_LIST_HEAD(&clp->cl_idhash);
INIT_LIST_HEAD(&clp->cl_openowners);
INIT_LIST_HEAD(&clp->cl_delegations);
INIT_LIST_HEAD(&clp->cl_lru);
INIT_LIST_HEAD(&clp->cl_revoked);
#ifdef CONFIG_NFSD_PNFS
INIT_LIST_HEAD(&clp->cl_lo_states);
#endif
spin_lock_init(&clp->cl_lock);
rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
return clp;
err_no_hashtbl:
kfree(clp->cl_name.data);
err_no_name:
kfree(clp);
return NULL;
}
static void
free_client(struct nfs4_client *clp)
{
while (!list_empty(&clp->cl_sessions)) {
struct nfsd4_session *ses;
ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
se_perclnt);
list_del(&ses->se_perclnt);
WARN_ON_ONCE(atomic_read(&ses->se_ref));
free_session(ses);
}
rpc_destroy_wait_queue(&clp->cl_cb_waitq);
free_svc_cred(&clp->cl_cred);
kfree(clp->cl_ownerstr_hashtbl);
kfree(clp->cl_name.data);
idr_destroy(&clp->cl_stateids);
kfree(clp);
}
/* must be called under the client_lock */
static void
unhash_client_locked(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct nfsd4_session *ses;
lockdep_assert_held(&nn->client_lock);
/* Mark the client as expired! */
clp->cl_time = 0;
/* Make it invisible */
if (!list_empty(&clp->cl_idhash)) {
list_del_init(&clp->cl_idhash);
if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
else
rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
}
list_del_init(&clp->cl_lru);
spin_lock(&clp->cl_lock);
list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
list_del_init(&ses->se_hash);
spin_unlock(&clp->cl_lock);
}
static void
unhash_client(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
spin_lock(&nn->client_lock);
unhash_client_locked(clp);
spin_unlock(&nn->client_lock);
}
static __be32 mark_client_expired_locked(struct nfs4_client *clp)
{
if (atomic_read(&clp->cl_refcount))
return nfserr_jukebox;
unhash_client_locked(clp);
return nfs_ok;
}
static void
__destroy_client(struct nfs4_client *clp)
{
struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
struct list_head reaplist;
INIT_LIST_HEAD(&reaplist);
spin_lock(&state_lock);
while (!list_empty(&clp->cl_delegations)) {
dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
WARN_ON(!unhash_delegation_locked(dp));
list_add(&dp->dl_recall_lru, &reaplist);
}
spin_unlock(&state_lock);
while (!list_empty(&reaplist)) {
dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
list_del_init(&dp->dl_recall_lru);
put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_deleg_lease(dp->dl_stid.sc_file);
nfs4_put_stid(&dp->dl_stid);
}
while (!list_empty(&clp->cl_revoked)) {
dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
list_del_init(&dp->dl_recall_lru);
nfs4_put_stid(&dp->dl_stid);
}
while (!list_empty(&clp->cl_openowners)) {
oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
nfs4_get_stateowner(&oo->oo_owner);
release_openowner(oo);
}
nfsd4_return_all_client_layouts(clp);
nfsd4_shutdown_callback(clp);
if (clp->cl_cb_conn.cb_xprt)
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
free_client(clp);
}
static void
destroy_client(struct nfs4_client *clp)
{
unhash_client(clp);
__destroy_client(clp);
}
static void expire_client(struct nfs4_client *clp)
{
unhash_client(clp);
nfsd4_client_record_remove(clp);
__destroy_client(clp);
}
static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
{
memcpy(target->cl_verifier.data, source->data,
sizeof(target->cl_verifier.data));
}
static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
{
target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
target->cl_clientid.cl_id = source->cl_clientid.cl_id;
}
static int copy_cred(struct svc_cred *target, struct svc_cred *source)
{
target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
target->cr_raw_principal = kstrdup(source->cr_raw_principal,
GFP_KERNEL);
if ((source->cr_principal && ! target->cr_principal) ||
(source->cr_raw_principal && ! target->cr_raw_principal))
return -ENOMEM;
target->cr_flavor = source->cr_flavor;
target->cr_uid = source->cr_uid;
target->cr_gid = source->cr_gid;
target->cr_group_info = source->cr_group_info;
get_group_info(target->cr_group_info);
target->cr_gss_mech = source->cr_gss_mech;
if (source->cr_gss_mech)
gss_mech_get(source->cr_gss_mech);
return 0;
}
static int
compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
{
if (o1->len < o2->len)
return -1;
if (o1->len > o2->len)
return 1;
return memcmp(o1->data, o2->data, o1->len);
}
static int same_name(const char *n1, const char *n2)
{
return 0 == memcmp(n1, n2, HEXDIR_LEN);
}
static int
same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
{
return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
}
static int
same_clid(clientid_t *cl1, clientid_t *cl2)
{
return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
}
static bool groups_equal(struct group_info *g1, struct group_info *g2)
{
int i;
if (g1->ngroups != g2->ngroups)
return false;
for (i=0; i<g1->ngroups; i++)
if (!gid_eq(g1->gid[i], g2->gid[i]))
return false;
return true;
}
/*
* RFC 3530 language requires clid_inuse be returned when the
* "principal" associated with a requests differs from that previously
* used. We use uid, gid's, and gss principal string as our best
* approximation. We also don't want to allow non-gss use of a client
* established using gss: in theory cr_principal should catch that
* change, but in practice cr_principal can be null even in the gss case
* since gssd doesn't always pass down a principal string.
*/
static bool is_gss_cred(struct svc_cred *cr)
{
/* Is cr_flavor one of the gss "pseudoflavors"?: */
return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
}
static bool
same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
{
if ((is_gss_cred(cr1) != is_gss_cred(cr2))
|| (!uid_eq(cr1->cr_uid, cr2->cr_uid))
|| (!gid_eq(cr1->cr_gid, cr2->cr_gid))
|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
return false;
if (cr1->cr_principal == cr2->cr_principal)
return true;
if (!cr1->cr_principal || !cr2->cr_principal)
return false;
return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
}
static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
{
struct svc_cred *cr = &rqstp->rq_cred;
u32 service;
if (!cr->cr_gss_mech)
return false;
service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
return service == RPC_GSS_SVC_INTEGRITY ||
service == RPC_GSS_SVC_PRIVACY;
}
bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
{
struct svc_cred *cr = &rqstp->rq_cred;
if (!cl->cl_mach_cred)
return true;
if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
return false;
if (!svc_rqst_integrity_protected(rqstp))
return false;
if (cl->cl_cred.cr_raw_principal)
return 0 == strcmp(cl->cl_cred.cr_raw_principal,
cr->cr_raw_principal);
if (!cr->cr_principal)
return false;
return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
}
static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
{
__be32 verf[2];
/*
* This is opaque to client, so no need to byte-swap. Use
* __force to keep sparse happy
*/
verf[0] = (__force __be32)get_seconds();
verf[1] = (__force __be32)nn->clverifier_counter++;
memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
}
static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
{
clp->cl_clientid.cl_boot = nn->boot_time;
clp->cl_clientid.cl_id = nn->clientid_counter++;
gen_confirm(clp, nn);
}
static struct nfs4_stid *
find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
{
struct nfs4_stid *ret;
ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
if (!ret || !ret->sc_type)
return NULL;
return ret;
}
static struct nfs4_stid *
find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
{
struct nfs4_stid *s;
spin_lock(&cl->cl_lock);
s = find_stateid_locked(cl, t);
if (s != NULL) {
if (typemask & s->sc_type)
atomic_inc(&s->sc_count);
else
s = NULL;
}
spin_unlock(&cl->cl_lock);
return s;
}
static struct nfs4_client *create_client(struct xdr_netobj name,
struct svc_rqst *rqstp, nfs4_verifier *verf)
{
struct nfs4_client *clp;
struct sockaddr *sa = svc_addr(rqstp);
int ret;
struct net *net = SVC_NET(rqstp);
clp = alloc_client(name);
if (clp == NULL)
return NULL;
ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
if (ret) {
free_client(clp);
return NULL;
}
nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
clp->cl_time = get_seconds();
clear_bit(0, &clp->cl_cb_slot_busy);
copy_verf(clp, verf);
rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
clp->cl_cb_session = NULL;
clp->net = net;
return clp;
}
static void
add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
struct nfs4_client *clp;
while (*new) {
clp = rb_entry(*new, struct nfs4_client, cl_namenode);
parent = *new;
if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
new = &((*new)->rb_left);
else
new = &((*new)->rb_right);
}
rb_link_node(&new_clp->cl_namenode, parent, new);
rb_insert_color(&new_clp->cl_namenode, root);
}
static struct nfs4_client *
find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
{
int cmp;
struct rb_node *node = root->rb_node;
struct nfs4_client *clp;
while (node) {
clp = rb_entry(node, struct nfs4_client, cl_namenode);
cmp = compare_blob(&clp->cl_name, name);
if (cmp > 0)
node = node->rb_left;
else if (cmp < 0)
node = node->rb_right;
else
return clp;
}
return NULL;
}
static void
add_to_unconfirmed(struct nfs4_client *clp)
{
unsigned int idhashval;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
add_clp_to_name_tree(clp, &nn->unconf_name_tree);
idhashval = clientid_hashval(clp->cl_clientid.cl_id);
list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
renew_client_locked(clp);
}
static void
move_to_confirmed(struct nfs4_client *clp)
{
unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
add_clp_to_name_tree(clp, &nn->conf_name_tree);
set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
renew_client_locked(clp);
}
static struct nfs4_client *
find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
{
struct nfs4_client *clp;
unsigned int idhashval = clientid_hashval(clid->cl_id);
list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
if (same_clid(&clp->cl_clientid, clid)) {
if ((bool)clp->cl_minorversion != sessions)
return NULL;
renew_client_locked(clp);
return clp;
}
}
return NULL;
}
static struct nfs4_client *
find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
{
struct list_head *tbl = nn->conf_id_hashtbl;
lockdep_assert_held(&nn->client_lock);
return find_client_in_id_table(tbl, clid, sessions);
}
static struct nfs4_client *
find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
{
struct list_head *tbl = nn->unconf_id_hashtbl;
lockdep_assert_held(&nn->client_lock);
return find_client_in_id_table(tbl, clid, sessions);
}
static bool clp_used_exchangeid(struct nfs4_client *clp)
{
return clp->cl_exchange_flags != 0;
}
static struct nfs4_client *
find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
{
lockdep_assert_held(&nn->client_lock);
return find_clp_in_name_tree(name, &nn->conf_name_tree);
}
static struct nfs4_client *
find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
{
lockdep_assert_held(&nn->client_lock);
return find_clp_in_name_tree(name, &nn->unconf_name_tree);
}
static void
gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
{
struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
struct sockaddr *sa = svc_addr(rqstp);
u32 scopeid = rpc_get_scope_id(sa);
unsigned short expected_family;
/* Currently, we only support tcp and tcp6 for the callback channel */
if (se->se_callback_netid_len == 3 &&
!memcmp(se->se_callback_netid_val, "tcp", 3))
expected_family = AF_INET;
else if (se->se_callback_netid_len == 4 &&
!memcmp(se->se_callback_netid_val, "tcp6", 4))
expected_family = AF_INET6;
else
goto out_err;
conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
se->se_callback_addr_len,
(struct sockaddr *)&conn->cb_addr,
sizeof(conn->cb_addr));
if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
goto out_err;
if (conn->cb_addr.ss_family == AF_INET6)
((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
conn->cb_prog = se->se_callback_prog;
conn->cb_ident = se->se_callback_ident;
memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
return;
out_err:
conn->cb_addr.ss_family = AF_UNSPEC;
conn->cb_addrlen = 0;
dprintk("NFSD: this client (clientid %08x/%08x) "
"will not receive delegations\n",
clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
return;
}
/*
* Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
*/
static void
nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
{
struct xdr_buf *buf = resp->xdr.buf;
struct nfsd4_slot *slot = resp->cstate.slot;
unsigned int base;
dprintk("--> %s slot %p\n", __func__, slot);
slot->sl_opcnt = resp->opcnt;
slot->sl_status = resp->cstate.status;
slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
if (nfsd4_not_cached(resp)) {
slot->sl_datalen = 0;
return;
}
base = resp->cstate.data_offset;
slot->sl_datalen = buf->len - base;
if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
WARN(1, "%s: sessions DRC could not cache compound\n",
__func__);
return;
}
/*
* Encode the replay sequence operation from the slot values.
* If cachethis is FALSE encode the uncached rep error on the next
* operation which sets resp->p and increments resp->opcnt for
* nfs4svc_encode_compoundres.
*
*/
static __be32
nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
struct nfsd4_compoundres *resp)
{
struct nfsd4_op *op;
struct nfsd4_slot *slot = resp->cstate.slot;
/* Encode the replayed sequence operation */
op = &args->ops[resp->opcnt - 1];
nfsd4_encode_operation(resp, op);
/* Return nfserr_retry_uncached_rep in next operation. */
if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
op = &args->ops[resp->opcnt++];
op->status = nfserr_retry_uncached_rep;
nfsd4_encode_operation(resp, op);
}
return op->status;
}
/*
* The sequence operation is not cached because we can use the slot and
* session values.
*/
static __be32
nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
struct nfsd4_sequence *seq)
{
struct nfsd4_slot *slot = resp->cstate.slot;
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
__be32 status;
dprintk("--> %s slot %p\n", __func__, slot);
status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
if (status)
return status;
p = xdr_reserve_space(xdr, slot->sl_datalen);
if (!p) {
WARN_ON_ONCE(1);
return nfserr_serverfault;
}
xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
xdr_commit_encode(xdr);
resp->opcnt = slot->sl_opcnt;
return slot->sl_status;
}
/*
* Set the exchange_id flags returned by the server.
*/
static void
nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
{
#ifdef CONFIG_NFSD_PNFS
new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
#else
new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
#endif
/* Referrals are supported, Migration is not. */
new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
/* set the wire flags to return to client. */
clid->flags = new->cl_exchange_flags;
}
static bool client_has_openowners(struct nfs4_client *clp)
{
struct nfs4_openowner *oo;
list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
if (!list_empty(&oo->oo_owner.so_stateids))
return true;
}
return false;
}
static bool client_has_state(struct nfs4_client *clp)
{
return client_has_openowners(clp)
#ifdef CONFIG_NFSD_PNFS
|| !list_empty(&clp->cl_lo_states)
#endif
|| !list_empty(&clp->cl_delegations)
|| !list_empty(&clp->cl_sessions);
}
__be32
nfsd4_exchange_id(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_exchange_id *exid)
{
struct nfs4_client *conf, *new;
struct nfs4_client *unconf = NULL;
__be32 status;
char addr_str[INET6_ADDRSTRLEN];
nfs4_verifier verf = exid->verifier;
struct sockaddr *sa = svc_addr(rqstp);
bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
rpc_ntop(sa, addr_str, sizeof(addr_str));
dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
"ip_addr=%s flags %x, spa_how %d\n",
__func__, rqstp, exid, exid->clname.len, exid->clname.data,
addr_str, exid->flags, exid->spa_how);
if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
return nfserr_inval;
new = create_client(exid->clname, rqstp, &verf);
if (new == NULL)
return nfserr_jukebox;
switch (exid->spa_how) {
case SP4_MACH_CRED:
exid->spo_must_enforce[0] = 0;
exid->spo_must_enforce[1] = (
1 << (OP_BIND_CONN_TO_SESSION - 32) |
1 << (OP_EXCHANGE_ID - 32) |
1 << (OP_CREATE_SESSION - 32) |
1 << (OP_DESTROY_SESSION - 32) |
1 << (OP_DESTROY_CLIENTID - 32));
exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
1 << (OP_OPEN_DOWNGRADE) |
1 << (OP_LOCKU) |
1 << (OP_DELEGRETURN));
exid->spo_must_allow[1] &= (
1 << (OP_TEST_STATEID - 32) |
1 << (OP_FREE_STATEID - 32));
if (!svc_rqst_integrity_protected(rqstp)) {
status = nfserr_inval;
goto out_nolock;
}
/*
* Sometimes userspace doesn't give us a principal.
* Which is a bug, really. Anyway, we can't enforce
* MACH_CRED in that case, better to give up now:
*/
if (!new->cl_cred.cr_principal &&
!new->cl_cred.cr_raw_principal) {
status = nfserr_serverfault;
goto out_nolock;
}
new->cl_mach_cred = true;
case SP4_NONE:
break;
default: /* checked by xdr code */
WARN_ON_ONCE(1);
case SP4_SSV:
status = nfserr_encr_alg_unsupp;
goto out_nolock;
}
/* Cases below refer to rfc 5661 section 18.35.4: */
spin_lock(&nn->client_lock);
conf = find_confirmed_client_by_name(&exid->clname, nn);
if (conf) {
bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
bool verfs_match = same_verf(&verf, &conf->cl_verifier);
if (update) {
if (!clp_used_exchangeid(conf)) { /* buggy client */
status = nfserr_inval;
goto out;
}
if (!nfsd4_mach_creds_match(conf, rqstp)) {
status = nfserr_wrong_cred;
goto out;
}
if (!creds_match) { /* case 9 */
status = nfserr_perm;
goto out;
}
if (!verfs_match) { /* case 8 */
status = nfserr_not_same;
goto out;
}
/* case 6 */
exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
goto out_copy;
}
if (!creds_match) { /* case 3 */
if (client_has_state(conf)) {
status = nfserr_clid_inuse;
goto out;
}
goto out_new;
}
if (verfs_match) { /* case 2 */
conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
goto out_copy;
}
/* case 5, client reboot */
conf = NULL;
goto out_new;
}
if (update) { /* case 7 */
status = nfserr_noent;
goto out;
}
unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
if (unconf) /* case 4, possible retry or client restart */
unhash_client_locked(unconf);
/* case 1 (normal case) */
out_new:
if (conf) {
status = mark_client_expired_locked(conf);
if (status)
goto out;
}
new->cl_minorversion = cstate->minorversion;
new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
gen_clid(new, nn);
add_to_unconfirmed(new);
swap(new, conf);
out_copy:
exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
exid->clientid.cl_id = conf->cl_clientid.cl_id;
exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
nfsd4_set_ex_flags(conf, exid);
dprintk("nfsd4_exchange_id seqid %d flags %x\n",
conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
status = nfs_ok;
out:
spin_unlock(&nn->client_lock);
out_nolock:
if (new)
expire_client(new);
if (unconf)
expire_client(unconf);
return status;
}
static __be32
check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
{
dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
slot_seqid);
/* The slot is in use, and no response has been sent. */
if (slot_inuse) {
if (seqid == slot_seqid)
return nfserr_jukebox;
else
return nfserr_seq_misordered;
}
/* Note unsigned 32-bit arithmetic handles wraparound: */
if (likely(seqid == slot_seqid + 1))
return nfs_ok;
if (seqid == slot_seqid)
return nfserr_replay_cache;
return nfserr_seq_misordered;
}
/*
* Cache the create session result into the create session single DRC
* slot cache by saving the xdr structure. sl_seqid has been set.
* Do this for solo or embedded create session operations.
*/
static void
nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
struct nfsd4_clid_slot *slot, __be32 nfserr)
{
slot->sl_status = nfserr;
memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
}
static __be32
nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
struct nfsd4_clid_slot *slot)
{
memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
return slot->sl_status;
}
#define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
1 + /* MIN tag is length with zero, only length */ \
3 + /* version, opcount, opcode */ \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
/* seqid, slotID, slotID, cache */ \
4 ) * sizeof(__be32))
#define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2 + /* verifier: AUTH_NULL, length 0 */\
1 + /* status */ \
1 + /* MIN tag is length with zero, only length */ \
3 + /* opcount, opcode, opstatus*/ \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
/* seqid, slotID, slotID, slotID, status */ \
5 ) * sizeof(__be32))
static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
{
u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
return nfserr_toosmall;
if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
return nfserr_toosmall;
ca->headerpadsz = 0;
ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
/*
* Note decreasing slot size below client's request may make it
* difficult for client to function correctly, whereas
* decreasing the number of slots will (just?) affect
* performance. When short on memory we therefore prefer to
* decrease number of slots instead of their size. Clients that
* request larger slots than they need will get poor results:
*/
ca->maxreqs = nfsd4_get_drc_mem(ca);
if (!ca->maxreqs)
return nfserr_jukebox;
return nfs_ok;
}
/*
* Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
* These are based on similar macros in linux/sunrpc/msg_prot.h .
*/
#define RPC_MAX_HEADER_WITH_AUTH_SYS \
(RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
#define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
(RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
#define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
#define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
sizeof(__be32))
static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
{
ca->headerpadsz = 0;
if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
return nfserr_toosmall;
if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
return nfserr_toosmall;
ca->maxresp_cached = 0;
if (ca->maxops < 2)
return nfserr_toosmall;
return nfs_ok;
}
static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
{
switch (cbs->flavor) {
case RPC_AUTH_NULL:
case RPC_AUTH_UNIX:
return nfs_ok;
default:
/*
* GSS case: the spec doesn't allow us to return this
* error. But it also doesn't allow us not to support
* GSS.
* I'd rather this fail hard than return some error the
* client might think it can already handle:
*/
return nfserr_encr_alg_unsupp;
}
}
__be32
nfsd4_create_session(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_create_session *cr_ses)
{
struct sockaddr *sa = svc_addr(rqstp);
struct nfs4_client *conf, *unconf;
struct nfs4_client *old = NULL;
struct nfsd4_session *new;
struct nfsd4_conn *conn;
struct nfsd4_clid_slot *cs_slot = NULL;
__be32 status = 0;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
return nfserr_inval;
status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
if (status)
return status;
status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
if (status)
return status;
status = check_backchannel_attrs(&cr_ses->back_channel);
if (status)
goto out_release_drc_mem;
status = nfserr_jukebox;
new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
if (!new)
goto out_release_drc_mem;
conn = alloc_conn_from_crses(rqstp, cr_ses);
if (!conn)
goto out_free_session;
spin_lock(&nn->client_lock);
unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
conf = find_confirmed_client(&cr_ses->clientid, true, nn);
WARN_ON_ONCE(conf && unconf);
if (conf) {
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(conf, rqstp))
goto out_free_conn;
cs_slot = &conf->cl_cs_slot;
status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
if (status) {
if (status == nfserr_replay_cache)
status = nfsd4_replay_create_session(cr_ses, cs_slot);
goto out_free_conn;
}
} else if (unconf) {
if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
!rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
status = nfserr_clid_inuse;
goto out_free_conn;
}
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(unconf, rqstp))
goto out_free_conn;
cs_slot = &unconf->cl_cs_slot;
status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
if (status) {
/* an unconfirmed replay returns misordered */
status = nfserr_seq_misordered;
goto out_free_conn;
}
old = find_confirmed_client_by_name(&unconf->cl_name, nn);
if (old) {
status = mark_client_expired_locked(old);
if (status) {
old = NULL;
goto out_free_conn;
}
}
move_to_confirmed(unconf);
conf = unconf;
} else {
status = nfserr_stale_clientid;
goto out_free_conn;
}
status = nfs_ok;
/* Persistent sessions are not supported */
cr_ses->flags &= ~SESSION4_PERSIST;
/* Upshifting from TCP to RDMA is not supported */
cr_ses->flags &= ~SESSION4_RDMA;
init_session(rqstp, new, conf, cr_ses);
nfsd4_get_session_locked(new);
memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
NFS4_MAX_SESSIONID_LEN);
cs_slot->sl_seqid++;
cr_ses->seqid = cs_slot->sl_seqid;
/* cache solo and embedded create sessions under the client_lock */
nfsd4_cache_create_session(cr_ses, cs_slot, status);
spin_unlock(&nn->client_lock);
/* init connection and backchannel */
nfsd4_init_conn(rqstp, conn, new);
nfsd4_put_session(new);
if (old)
expire_client(old);
return status;
out_free_conn:
spin_unlock(&nn->client_lock);
free_conn(conn);
if (old)
expire_client(old);
out_free_session:
__free_session(new);
out_release_drc_mem:
nfsd4_put_drc_mem(&cr_ses->fore_channel);
return status;
}
static __be32 nfsd4_map_bcts_dir(u32 *dir)
{
switch (*dir) {
case NFS4_CDFC4_FORE:
case NFS4_CDFC4_BACK:
return nfs_ok;
case NFS4_CDFC4_FORE_OR_BOTH:
case NFS4_CDFC4_BACK_OR_BOTH:
*dir = NFS4_CDFC4_BOTH;
return nfs_ok;
};
return nfserr_inval;
}
__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
{
struct nfsd4_session *session = cstate->session;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
__be32 status;
status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
if (status)
return status;
spin_lock(&nn->client_lock);
session->se_cb_prog = bc->bc_cb_program;
session->se_cb_sec = bc->bc_cb_sec;
spin_unlock(&nn->client_lock);
nfsd4_probe_callback(session->se_client);
return nfs_ok;
}
__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_bind_conn_to_session *bcts)
{
__be32 status;
struct nfsd4_conn *conn;
struct nfsd4_session *session;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
if (!nfsd4_last_compound_op(rqstp))
return nfserr_not_only_op;
spin_lock(&nn->client_lock);
session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
spin_unlock(&nn->client_lock);
if (!session)
goto out_no_session;
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(session->se_client, rqstp))
goto out;
status = nfsd4_map_bcts_dir(&bcts->dir);
if (status)
goto out;
conn = alloc_conn(rqstp, bcts->dir);
status = nfserr_jukebox;
if (!conn)
goto out;
nfsd4_init_conn(rqstp, conn, session);
status = nfs_ok;
out:
nfsd4_put_session(session);
out_no_session:
return status;
}
static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
{
if (!session)
return 0;
return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
}
__be32
nfsd4_destroy_session(struct svc_rqst *r,
struct nfsd4_compound_state *cstate,
struct nfsd4_destroy_session *sessionid)
{
struct nfsd4_session *ses;
__be32 status;
int ref_held_by_me = 0;
struct net *net = SVC_NET(r);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
status = nfserr_not_only_op;
if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
if (!nfsd4_last_compound_op(r))
goto out;
ref_held_by_me++;
}
dump_sessionid(__func__, &sessionid->sessionid);
spin_lock(&nn->client_lock);
ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
if (!ses)
goto out_client_lock;
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(ses->se_client, r))
goto out_put_session;
status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
if (status)
goto out_put_session;
unhash_session(ses);
spin_unlock(&nn->client_lock);
nfsd4_probe_callback_sync(ses->se_client);
spin_lock(&nn->client_lock);
status = nfs_ok;
out_put_session:
nfsd4_put_session_locked(ses);
out_client_lock:
spin_unlock(&nn->client_lock);
out:
return status;
}
static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
{
struct nfsd4_conn *c;
list_for_each_entry(c, &s->se_conns, cn_persession) {
if (c->cn_xprt == xpt) {
return c;
}
}
return NULL;
}
static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
struct nfsd4_conn *c;
__be32 status = nfs_ok;
int ret;
spin_lock(&clp->cl_lock);
c = __nfsd4_find_conn(new->cn_xprt, ses);
if (c)
goto out_free;
status = nfserr_conn_not_bound_to_session;
if (clp->cl_mach_cred)
goto out_free;
__nfsd4_hash_conn(new, ses);
spin_unlock(&clp->cl_lock);
ret = nfsd4_register_conn(new);
if (ret)
/* oops; xprt is already down: */
nfsd4_conn_lost(&new->cn_xpt_user);
return nfs_ok;
out_free:
spin_unlock(&clp->cl_lock);
free_conn(new);
return status;
}
static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
{
struct nfsd4_compoundargs *args = rqstp->rq_argp;
return args->opcnt > session->se_fchannel.maxops;
}
static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
struct nfsd4_session *session)
{
struct xdr_buf *xb = &rqstp->rq_arg;
return xb->len > session->se_fchannel.maxreq_sz;
}
__be32
nfsd4_sequence(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_sequence *seq)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct xdr_stream *xdr = &resp->xdr;
struct nfsd4_session *session;
struct nfs4_client *clp;
struct nfsd4_slot *slot;
struct nfsd4_conn *conn;
__be32 status;
int buflen;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
if (resp->opcnt != 1)
return nfserr_sequence_pos;
/*
* Will be either used or freed by nfsd4_sequence_check_conn
* below.
*/
conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
if (!conn)
return nfserr_jukebox;
spin_lock(&nn->client_lock);
session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
if (!session)
goto out_no_session;
clp = session->se_client;
status = nfserr_too_many_ops;
if (nfsd4_session_too_many_ops(rqstp, session))
goto out_put_session;
status = nfserr_req_too_big;
if (nfsd4_request_too_big(rqstp, session))
goto out_put_session;
status = nfserr_badslot;
if (seq->slotid >= session->se_fchannel.maxreqs)
goto out_put_session;
slot = session->se_slots[seq->slotid];
dprintk("%s: slotid %d\n", __func__, seq->slotid);
/* We do not negotiate the number of slots yet, so set the
* maxslots to the session maxreqs which is used to encode
* sr_highest_slotid and the sr_target_slot id to maxslots */
seq->maxslots = session->se_fchannel.maxreqs;
status = check_slot_seqid(seq->seqid, slot->sl_seqid,
slot->sl_flags & NFSD4_SLOT_INUSE);
if (status == nfserr_replay_cache) {
status = nfserr_seq_misordered;
if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
goto out_put_session;
cstate->slot = slot;
cstate->session = session;
cstate->clp = clp;
/* Return the cached reply status and set cstate->status
* for nfsd4_proc_compound processing */
status = nfsd4_replay_cache_entry(resp, seq);
cstate->status = nfserr_replay_cache;
goto out;
}
if (status)
goto out_put_session;
status = nfsd4_sequence_check_conn(conn, session);
conn = NULL;
if (status)
goto out_put_session;
buflen = (seq->cachethis) ?
session->se_fchannel.maxresp_cached :
session->se_fchannel.maxresp_sz;
status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
nfserr_rep_too_big;
if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
goto out_put_session;
svc_reserve(rqstp, buflen);
status = nfs_ok;
/* Success! bump slot seqid */
slot->sl_seqid = seq->seqid;
slot->sl_flags |= NFSD4_SLOT_INUSE;
if (seq->cachethis)
slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
else
slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
cstate->slot = slot;
cstate->session = session;
cstate->clp = clp;
out:
switch (clp->cl_cb_state) {
case NFSD4_CB_DOWN:
seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
break;
case NFSD4_CB_FAULT:
seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
break;
default:
seq->status_flags = 0;
}
if (!list_empty(&clp->cl_revoked))
seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
out_no_session:
if (conn)
free_conn(conn);
spin_unlock(&nn->client_lock);
return status;
out_put_session:
nfsd4_put_session_locked(session);
goto out_no_session;
}
void
nfsd4_sequence_done(struct nfsd4_compoundres *resp)
{
struct nfsd4_compound_state *cs = &resp->cstate;
if (nfsd4_has_session(cs)) {
if (cs->status != nfserr_replay_cache) {
nfsd4_store_cache_entry(resp);
cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
}
/* Drop session reference that was taken in nfsd4_sequence() */
nfsd4_put_session(cs->session);
} else if (cs->clp)
put_client_renew(cs->clp);
}
__be32
nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
{
struct nfs4_client *conf, *unconf;
struct nfs4_client *clp = NULL;
__be32 status = 0;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
spin_lock(&nn->client_lock);
unconf = find_unconfirmed_client(&dc->clientid, true, nn);
conf = find_confirmed_client(&dc->clientid, true, nn);
WARN_ON_ONCE(conf && unconf);
if (conf) {
if (client_has_state(conf)) {
status = nfserr_clientid_busy;
goto out;
}
status = mark_client_expired_locked(conf);
if (status)
goto out;
clp = conf;
} else if (unconf)
clp = unconf;
else {
status = nfserr_stale_clientid;
goto out;
}
if (!nfsd4_mach_creds_match(clp, rqstp)) {
clp = NULL;
status = nfserr_wrong_cred;
goto out;
}
unhash_client_locked(clp);
out:
spin_unlock(&nn->client_lock);
if (clp)
expire_client(clp);
return status;
}
__be32
nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
{
__be32 status = 0;
if (rc->rca_one_fs) {
if (!cstate->current_fh.fh_dentry)
return nfserr_nofilehandle;
/*
* We don't take advantage of the rca_one_fs case.
* That's OK, it's optional, we can safely ignore it.
*/
return nfs_ok;
}
status = nfserr_complete_already;
if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
&cstate->session->se_client->cl_flags))
goto out;
status = nfserr_stale_clientid;
if (is_client_expired(cstate->session->se_client))
/*
* The following error isn't really legal.
* But we only get here if the client just explicitly
* destroyed the client. Surely it no longer cares what
* error it gets back on an operation for the dead
* client.
*/
goto out;
status = nfs_ok;
nfsd4_client_record_create(cstate->session->se_client);
out:
return status;
}
__be32
nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_setclientid *setclid)
{
struct xdr_netobj clname = setclid->se_name;
nfs4_verifier clverifier = setclid->se_verf;
struct nfs4_client *conf, *new;
struct nfs4_client *unconf = NULL;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
new = create_client(clname, rqstp, &clverifier);
if (new == NULL)
return nfserr_jukebox;
/* Cases below refer to rfc 3530 section 14.2.33: */
spin_lock(&nn->client_lock);
conf = find_confirmed_client_by_name(&clname, nn);
if (conf && client_has_state(conf)) {
/* case 0: */
status = nfserr_clid_inuse;
if (clp_used_exchangeid(conf))
goto out;
if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
char addr_str[INET6_ADDRSTRLEN];
rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
sizeof(addr_str));
dprintk("NFSD: setclientid: string in use by client "
"at %s\n", addr_str);
goto out;
}
}
unconf = find_unconfirmed_client_by_name(&clname, nn);
if (unconf)
unhash_client_locked(unconf);
if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
/* case 1: probable callback update */
copy_clid(new, conf);
gen_confirm(new, nn);
} else /* case 4 (new client) or cases 2, 3 (client reboot): */
gen_clid(new, nn);
new->cl_minorversion = 0;
gen_callback(new, setclid, rqstp);
add_to_unconfirmed(new);
setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
new = NULL;
status = nfs_ok;
out:
spin_unlock(&nn->client_lock);
if (new)
free_client(new);
if (unconf)
expire_client(unconf);
return status;
}
__be32
nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_setclientid_confirm *setclientid_confirm)
{
struct nfs4_client *conf, *unconf;
struct nfs4_client *old = NULL;
nfs4_verifier confirm = setclientid_confirm->sc_confirm;
clientid_t * clid = &setclientid_confirm->sc_clientid;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if (STALE_CLIENTID(clid, nn))
return nfserr_stale_clientid;
spin_lock(&nn->client_lock);
conf = find_confirmed_client(clid, false, nn);
unconf = find_unconfirmed_client(clid, false, nn);
/*
* We try hard to give out unique clientid's, so if we get an
* attempt to confirm the same clientid with a different cred,
* the client may be buggy; this should never happen.
*
* Nevertheless, RFC 7530 recommends INUSE for this case:
*/
status = nfserr_clid_inuse;
if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
goto out;
if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
goto out;
/* cases below refer to rfc 3530 section 14.2.34: */
if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
if (conf && same_verf(&confirm, &conf->cl_confirm)) {
/* case 2: probable retransmit */
status = nfs_ok;
} else /* case 4: client hasn't noticed we rebooted yet? */
status = nfserr_stale_clientid;
goto out;
}
status = nfs_ok;
if (conf) { /* case 1: callback update */
old = unconf;
unhash_client_locked(old);
nfsd4_change_callback(conf, &unconf->cl_cb_conn);
} else { /* case 3: normal case; new or rebooted client */
old = find_confirmed_client_by_name(&unconf->cl_name, nn);
if (old) {
status = nfserr_clid_inuse;
if (client_has_state(old)
&& !same_creds(&unconf->cl_cred,
&old->cl_cred))
goto out;
status = mark_client_expired_locked(old);
if (status) {
old = NULL;
goto out;
}
}
move_to_confirmed(unconf);
conf = unconf;
}
get_client_locked(conf);
spin_unlock(&nn->client_lock);
nfsd4_probe_callback(conf);
spin_lock(&nn->client_lock);
put_client_renew_locked(conf);
out:
spin_unlock(&nn->client_lock);
if (old)
expire_client(old);
return status;
}
static struct nfs4_file *nfsd4_alloc_file(void)
{
return kmem_cache_alloc(file_slab, GFP_KERNEL);
}
/* OPEN Share state helper functions */
static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
struct nfs4_file *fp)
{
lockdep_assert_held(&state_lock);
atomic_set(&fp->fi_ref, 1);
spin_lock_init(&fp->fi_lock);
INIT_LIST_HEAD(&fp->fi_stateids);
INIT_LIST_HEAD(&fp->fi_delegations);
INIT_LIST_HEAD(&fp->fi_clnt_odstate);
fh_copy_shallow(&fp->fi_fhandle, fh);
fp->fi_deleg_file = NULL;
fp->fi_had_conflict = false;
fp->fi_share_deny = 0;
memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
memset(fp->fi_access, 0, sizeof(fp->fi_access));
#ifdef CONFIG_NFSD_PNFS
INIT_LIST_HEAD(&fp->fi_lo_states);
atomic_set(&fp->fi_lo_recalls, 0);
#endif
hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
}
void
nfsd4_free_slabs(void)
{
kmem_cache_destroy(odstate_slab);
kmem_cache_destroy(openowner_slab);
kmem_cache_destroy(lockowner_slab);
kmem_cache_destroy(file_slab);
kmem_cache_destroy(stateid_slab);
kmem_cache_destroy(deleg_slab);
}
int
nfsd4_init_slabs(void)
{
openowner_slab = kmem_cache_create("nfsd4_openowners",
sizeof(struct nfs4_openowner), 0, 0, NULL);
if (openowner_slab == NULL)
goto out;
lockowner_slab = kmem_cache_create("nfsd4_lockowners",
sizeof(struct nfs4_lockowner), 0, 0, NULL);
if (lockowner_slab == NULL)
goto out_free_openowner_slab;
file_slab = kmem_cache_create("nfsd4_files",
sizeof(struct nfs4_file), 0, 0, NULL);
if (file_slab == NULL)
goto out_free_lockowner_slab;
stateid_slab = kmem_cache_create("nfsd4_stateids",
sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
if (stateid_slab == NULL)
goto out_free_file_slab;
deleg_slab = kmem_cache_create("nfsd4_delegations",
sizeof(struct nfs4_delegation), 0, 0, NULL);
if (deleg_slab == NULL)
goto out_free_stateid_slab;
odstate_slab = kmem_cache_create("nfsd4_odstate",
sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
if (odstate_slab == NULL)
goto out_free_deleg_slab;
return 0;
out_free_deleg_slab:
kmem_cache_destroy(deleg_slab);
out_free_stateid_slab:
kmem_cache_destroy(stateid_slab);
out_free_file_slab:
kmem_cache_destroy(file_slab);
out_free_lockowner_slab:
kmem_cache_destroy(lockowner_slab);
out_free_openowner_slab:
kmem_cache_destroy(openowner_slab);
out:
dprintk("nfsd4: out of memory while initializing nfsv4\n");
return -ENOMEM;
}
static void init_nfs4_replay(struct nfs4_replay *rp)
{
rp->rp_status = nfserr_serverfault;
rp->rp_buflen = 0;
rp->rp_buf = rp->rp_ibuf;
mutex_init(&rp->rp_mutex);
}
static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
struct nfs4_stateowner *so)
{
if (!nfsd4_has_session(cstate)) {
mutex_lock(&so->so_replay.rp_mutex);
cstate->replay_owner = nfs4_get_stateowner(so);
}
}
void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
{
struct nfs4_stateowner *so = cstate->replay_owner;
if (so != NULL) {
cstate->replay_owner = NULL;
mutex_unlock(&so->so_replay.rp_mutex);
nfs4_put_stateowner(so);
}
}
static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
{
struct nfs4_stateowner *sop;
sop = kmem_cache_alloc(slab, GFP_KERNEL);
if (!sop)
return NULL;
sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
if (!sop->so_owner.data) {
kmem_cache_free(slab, sop);
return NULL;
}
sop->so_owner.len = owner->len;
INIT_LIST_HEAD(&sop->so_stateids);
sop->so_client = clp;
init_nfs4_replay(&sop->so_replay);
atomic_set(&sop->so_count, 1);
return sop;
}
static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
{
lockdep_assert_held(&clp->cl_lock);
list_add(&oo->oo_owner.so_strhash,
&clp->cl_ownerstr_hashtbl[strhashval]);
list_add(&oo->oo_perclient, &clp->cl_openowners);
}
static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
{
unhash_openowner_locked(openowner(so));
}
static void nfs4_free_openowner(struct nfs4_stateowner *so)
{
struct nfs4_openowner *oo = openowner(so);
kmem_cache_free(openowner_slab, oo);
}
static const struct nfs4_stateowner_operations openowner_ops = {
.so_unhash = nfs4_unhash_openowner,
.so_free = nfs4_free_openowner,
};
static struct nfs4_ol_stateid *
nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
{
struct nfs4_ol_stateid *local, *ret = NULL;
struct nfs4_openowner *oo = open->op_openowner;
lockdep_assert_held(&fp->fi_lock);
list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
/* ignore lock owners */
if (local->st_stateowner->so_is_open_owner == 0)
continue;
if (local->st_stateowner == &oo->oo_owner) {
ret = local;
atomic_inc(&ret->st_stid.sc_count);
break;
}
}
return ret;
}
static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
struct nfsd4_compound_state *cstate)
{
struct nfs4_client *clp = cstate->clp;
struct nfs4_openowner *oo, *ret;
oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
if (!oo)
return NULL;
oo->oo_owner.so_ops = &openowner_ops;
oo->oo_owner.so_is_open_owner = 1;
oo->oo_owner.so_seqid = open->op_seqid;
oo->oo_flags = 0;
if (nfsd4_has_session(cstate))
oo->oo_flags |= NFS4_OO_CONFIRMED;
oo->oo_time = 0;
oo->oo_last_closed_stid = NULL;
INIT_LIST_HEAD(&oo->oo_close_lru);
spin_lock(&clp->cl_lock);
ret = find_openstateowner_str_locked(strhashval, open, clp);
if (ret == NULL) {
hash_openowner(oo, clp, strhashval);
ret = oo;
} else
nfs4_free_stateowner(&oo->oo_owner);
spin_unlock(&clp->cl_lock);
return ret;
}
static struct nfs4_ol_stateid *
init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
{
struct nfs4_openowner *oo = open->op_openowner;
struct nfs4_ol_stateid *retstp = NULL;
struct nfs4_ol_stateid *stp;
stp = open->op_stp;
/* We are moving these outside of the spinlocks to avoid the warnings */
mutex_init(&stp->st_mutex);
mutex_lock(&stp->st_mutex);
spin_lock(&oo->oo_owner.so_client->cl_lock);
spin_lock(&fp->fi_lock);
retstp = nfsd4_find_existing_open(fp, open);
if (retstp)
goto out_unlock;
open->op_stp = NULL;
atomic_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_OPEN_STID;
INIT_LIST_HEAD(&stp->st_locks);
stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
get_nfs4_file(fp);
stp->st_stid.sc_file = fp;
stp->st_access_bmap = 0;
stp->st_deny_bmap = 0;
stp->st_openstp = NULL;
list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
list_add(&stp->st_perfile, &fp->fi_stateids);
out_unlock:
spin_unlock(&fp->fi_lock);
spin_unlock(&oo->oo_owner.so_client->cl_lock);
if (retstp) {
mutex_lock(&retstp->st_mutex);
/* To keep mutex tracking happy */
mutex_unlock(&stp->st_mutex);
stp = retstp;
}
return stp;
}
/*
* In the 4.0 case we need to keep the owners around a little while to handle
* CLOSE replay. We still do need to release any file access that is held by
* them before returning however.
*/
static void
move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
{
struct nfs4_ol_stateid *last;
struct nfs4_openowner *oo = openowner(s->st_stateowner);
struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
nfsd_net_id);
dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
/*
* We know that we hold one reference via nfsd4_close, and another
* "persistent" reference for the client. If the refcount is higher
* than 2, then there are still calls in progress that are using this
* stateid. We can't put the sc_file reference until they are finished.
* Wait for the refcount to drop to 2. Since it has been unhashed,
* there should be no danger of the refcount going back up again at
* this point.
*/
wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);
release_all_access(s);
if (s->st_stid.sc_file) {
put_nfs4_file(s->st_stid.sc_file);
s->st_stid.sc_file = NULL;
}
spin_lock(&nn->client_lock);
last = oo->oo_last_closed_stid;
oo->oo_last_closed_stid = s;
list_move_tail(&oo->oo_close_lru, &nn->close_lru);
oo->oo_time = get_seconds();
spin_unlock(&nn->client_lock);
if (last)
nfs4_put_stid(&last->st_stid);
}
/* search file_hashtbl[] for file */
static struct nfs4_file *
find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
{
struct nfs4_file *fp;
hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
if (fh_match(&fp->fi_fhandle, fh)) {
if (atomic_inc_not_zero(&fp->fi_ref))
return fp;
}
}
return NULL;
}
struct nfs4_file *
find_file(struct knfsd_fh *fh)
{
struct nfs4_file *fp;
unsigned int hashval = file_hashval(fh);
rcu_read_lock();
fp = find_file_locked(fh, hashval);
rcu_read_unlock();
return fp;
}
static struct nfs4_file *
find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
{
struct nfs4_file *fp;
unsigned int hashval = file_hashval(fh);
rcu_read_lock();
fp = find_file_locked(fh, hashval);
rcu_read_unlock();
if (fp)
return fp;
spin_lock(&state_lock);
fp = find_file_locked(fh, hashval);
if (likely(fp == NULL)) {
nfsd4_init_file(fh, hashval, new);
fp = new;
}
spin_unlock(&state_lock);
return fp;
}
/*
* Called to check deny when READ with all zero stateid or
* WRITE with all zero or all one stateid
*/
static __be32
nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
{
struct nfs4_file *fp;
__be32 ret = nfs_ok;
fp = find_file(¤t_fh->fh_handle);
if (!fp)
return ret;
/* Check for conflicting share reservations */
spin_lock(&fp->fi_lock);
if (fp->fi_share_deny & deny_type)
ret = nfserr_locked;
spin_unlock(&fp->fi_lock);
put_nfs4_file(fp);
return ret;
}
static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
{
struct nfs4_delegation *dp = cb_to_delegation(cb);
struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
nfsd_net_id);
block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
/*
* We can't do this in nfsd_break_deleg_cb because it is
* already holding inode->i_lock.
*
* If the dl_time != 0, then we know that it has already been
* queued for a lease break. Don't queue it again.
*/
spin_lock(&state_lock);
if (dp->dl_time == 0) {
dp->dl_time = get_seconds();
list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
}
spin_unlock(&state_lock);
}
static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
struct rpc_task *task)
{
struct nfs4_delegation *dp = cb_to_delegation(cb);
if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
return 1;
switch (task->tk_status) {
case 0:
return 1;
case -EBADHANDLE:
case -NFS4ERR_BAD_STATEID:
/*
* Race: client probably got cb_recall before open reply
* granting delegation.
*/
if (dp->dl_retries--) {
rpc_delay(task, 2 * HZ);
return 0;
}
/*FALLTHRU*/
default:
return -1;
}
}
static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
{
struct nfs4_delegation *dp = cb_to_delegation(cb);
nfs4_put_stid(&dp->dl_stid);
}
static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
.prepare = nfsd4_cb_recall_prepare,
.done = nfsd4_cb_recall_done,
.release = nfsd4_cb_recall_release,
};
static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
{
/*
* We're assuming the state code never drops its reference
* without first removing the lease. Since we're in this lease
* callback (and since the lease code is serialized by the kernel
* lock) we know the server hasn't removed the lease yet, we know
* it's safe to take a reference.
*/
atomic_inc(&dp->dl_stid.sc_count);
nfsd4_run_cb(&dp->dl_recall);
}
/* Called from break_lease() with i_lock held. */
static bool
nfsd_break_deleg_cb(struct file_lock *fl)
{
bool ret = false;
struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
struct nfs4_delegation *dp;
if (!fp) {
WARN(1, "(%p)->fl_owner NULL\n", fl);
return ret;
}
if (fp->fi_had_conflict) {
WARN(1, "duplicate break on %p\n", fp);
return ret;
}
/*
* We don't want the locks code to timeout the lease for us;
* we'll remove it ourself if a delegation isn't returned
* in time:
*/
fl->fl_break_time = 0;
spin_lock(&fp->fi_lock);
fp->fi_had_conflict = true;
/*
* If there are no delegations on the list, then return true
* so that the lease code will go ahead and delete it.
*/
if (list_empty(&fp->fi_delegations))
ret = true;
else
list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
nfsd_break_one_deleg(dp);
spin_unlock(&fp->fi_lock);
return ret;
}
static int
nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
struct list_head *dispose)
{
if (arg & F_UNLCK)
return lease_modify(onlist, arg, dispose);
else
return -EAGAIN;
}
static const struct lock_manager_operations nfsd_lease_mng_ops = {
.lm_break = nfsd_break_deleg_cb,
.lm_change = nfsd_change_deleg_cb,
};
static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
{
if (nfsd4_has_session(cstate))
return nfs_ok;
if (seqid == so->so_seqid - 1)
return nfserr_replay_me;
if (seqid == so->so_seqid)
return nfs_ok;
return nfserr_bad_seqid;
}
static __be32 lookup_clientid(clientid_t *clid,
struct nfsd4_compound_state *cstate,
struct nfsd_net *nn)
{
struct nfs4_client *found;
if (cstate->clp) {
found = cstate->clp;
if (!same_clid(&found->cl_clientid, clid))
return nfserr_stale_clientid;
return nfs_ok;
}
if (STALE_CLIENTID(clid, nn))
return nfserr_stale_clientid;
/*
* For v4.1+ we get the client in the SEQUENCE op. If we don't have one
* cached already then we know this is for is for v4.0 and "sessions"
* will be false.
*/
WARN_ON_ONCE(cstate->session);
spin_lock(&nn->client_lock);
found = find_confirmed_client(clid, false, nn);
if (!found) {
spin_unlock(&nn->client_lock);
return nfserr_expired;
}
atomic_inc(&found->cl_refcount);
spin_unlock(&nn->client_lock);
/* Cache the nfs4_client in cstate! */
cstate->clp = found;
return nfs_ok;
}
__be32
nfsd4_process_open1(struct nfsd4_compound_state *cstate,
struct nfsd4_open *open, struct nfsd_net *nn)
{
clientid_t *clientid = &open->op_clientid;
struct nfs4_client *clp = NULL;
unsigned int strhashval;
struct nfs4_openowner *oo = NULL;
__be32 status;
if (STALE_CLIENTID(&open->op_clientid, nn))
return nfserr_stale_clientid;
/*
* In case we need it later, after we've already created the
* file and don't want to risk a further failure:
*/
open->op_file = nfsd4_alloc_file();
if (open->op_file == NULL)
return nfserr_jukebox;
status = lookup_clientid(clientid, cstate, nn);
if (status)
return status;
clp = cstate->clp;
strhashval = ownerstr_hashval(&open->op_owner);
oo = find_openstateowner_str(strhashval, open, clp);
open->op_openowner = oo;
if (!oo) {
goto new_owner;
}
if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
/* Replace unconfirmed owners without checking for replay. */
release_openowner(oo);
open->op_openowner = NULL;
goto new_owner;
}
status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
if (status)
return status;
goto alloc_stateid;
new_owner:
oo = alloc_init_open_stateowner(strhashval, open, cstate);
if (oo == NULL)
return nfserr_jukebox;
open->op_openowner = oo;
alloc_stateid:
open->op_stp = nfs4_alloc_open_stateid(clp);
if (!open->op_stp)
return nfserr_jukebox;
if (nfsd4_has_session(cstate) &&
(cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
open->op_odstate = alloc_clnt_odstate(clp);
if (!open->op_odstate)
return nfserr_jukebox;
}
return nfs_ok;
}
static inline __be32
nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
{
if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
return nfserr_openmode;
else
return nfs_ok;
}
static int share_access_to_flags(u32 share_access)
{
return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
}
static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
{
struct nfs4_stid *ret;
ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
if (!ret)
return NULL;
return delegstateid(ret);
}
static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
{
return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
}
static __be32
nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
struct nfs4_delegation **dp)
{
int flags;
__be32 status = nfserr_bad_stateid;
struct nfs4_delegation *deleg;
deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
if (deleg == NULL)
goto out;
flags = share_access_to_flags(open->op_share_access);
status = nfs4_check_delegmode(deleg, flags);
if (status) {
nfs4_put_stid(&deleg->dl_stid);
goto out;
}
*dp = deleg;
out:
if (!nfsd4_is_deleg_cur(open))
return nfs_ok;
if (status)
return status;
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
return nfs_ok;
}
static inline int nfs4_access_to_access(u32 nfs4_access)
{
int flags = 0;
if (nfs4_access & NFS4_SHARE_ACCESS_READ)
flags |= NFSD_MAY_READ;
if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
flags |= NFSD_MAY_WRITE;
return flags;
}
static inline __be32
nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
struct nfsd4_open *open)
{
struct iattr iattr = {
.ia_valid = ATTR_SIZE,
.ia_size = 0,
};
if (!open->op_truncate)
return 0;
if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
return nfserr_inval;
return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
}
static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
struct nfsd4_open *open)
{
struct file *filp = NULL;
__be32 status;
int oflag = nfs4_access_to_omode(open->op_share_access);
int access = nfs4_access_to_access(open->op_share_access);
unsigned char old_access_bmap, old_deny_bmap;
spin_lock(&fp->fi_lock);
/*
* Are we trying to set a deny mode that would conflict with
* current access?
*/
status = nfs4_file_check_deny(fp, open->op_share_deny);
if (status != nfs_ok) {
spin_unlock(&fp->fi_lock);
goto out;
}
/* set access to the file */
status = nfs4_file_get_access(fp, open->op_share_access);
if (status != nfs_ok) {
spin_unlock(&fp->fi_lock);
goto out;
}
/* Set access bits in stateid */
old_access_bmap = stp->st_access_bmap;
set_access(open->op_share_access, stp);
/* Set new deny mask */
old_deny_bmap = stp->st_deny_bmap;
set_deny(open->op_share_deny, stp);
fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
if (!fp->fi_fds[oflag]) {
spin_unlock(&fp->fi_lock);
status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
if (status)
goto out_put_access;
spin_lock(&fp->fi_lock);
if (!fp->fi_fds[oflag]) {
fp->fi_fds[oflag] = filp;
filp = NULL;
}
}
spin_unlock(&fp->fi_lock);
if (filp)
fput(filp);
status = nfsd4_truncate(rqstp, cur_fh, open);
if (status)
goto out_put_access;
out:
return status;
out_put_access:
stp->st_access_bmap = old_access_bmap;
nfs4_file_put_access(fp, open->op_share_access);
reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
goto out;
}
static __be32
nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
{
__be32 status;
unsigned char old_deny_bmap = stp->st_deny_bmap;
if (!test_access(open->op_share_access, stp))
return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
/* test and set deny mode */
spin_lock(&fp->fi_lock);
status = nfs4_file_check_deny(fp, open->op_share_deny);
if (status == nfs_ok) {
set_deny(open->op_share_deny, stp);
fp->fi_share_deny |=
(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
}
spin_unlock(&fp->fi_lock);
if (status != nfs_ok)
return status;
status = nfsd4_truncate(rqstp, cur_fh, open);
if (status != nfs_ok)
reset_union_bmap_deny(old_deny_bmap, stp);
return status;
}
/* Should we give out recallable state?: */
static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
{
if (clp->cl_cb_state == NFSD4_CB_UP)
return true;
/*
* In the sessions case, since we don't have to establish a
* separate connection for callbacks, we assume it's OK
* until we hear otherwise:
*/
return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
}
static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
{
struct file_lock *fl;
fl = locks_alloc_lock();
if (!fl)
return NULL;
fl->fl_lmops = &nfsd_lease_mng_ops;
fl->fl_flags = FL_DELEG;
fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
fl->fl_end = OFFSET_MAX;
fl->fl_owner = (fl_owner_t)fp;
fl->fl_pid = current->tgid;
return fl;
}
/**
* nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
* @dp: a pointer to the nfs4_delegation we're adding.
*
* Return:
* On success: Return code will be 0 on success.
*
* On error: -EAGAIN if there was an existing delegation.
* nonzero if there is an error in other cases.
*
*/
static int nfs4_setlease(struct nfs4_delegation *dp)
{
struct nfs4_file *fp = dp->dl_stid.sc_file;
struct file_lock *fl;
struct file *filp;
int status = 0;
fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
if (!fl)
return -ENOMEM;
filp = find_readable_file(fp);
if (!filp) {
/* We should always have a readable file here */
WARN_ON_ONCE(1);
locks_free_lock(fl);
return -EBADF;
}
fl->fl_file = filp;
status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
if (fl)
locks_free_lock(fl);
if (status)
goto out_fput;
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
/* Did the lease get broken before we took the lock? */
status = -EAGAIN;
if (fp->fi_had_conflict)
goto out_unlock;
/* Race breaker */
if (fp->fi_deleg_file) {
status = hash_delegation_locked(dp, fp);
goto out_unlock;
}
fp->fi_deleg_file = filp;
fp->fi_delegees = 0;
status = hash_delegation_locked(dp, fp);
spin_unlock(&fp->fi_lock);
spin_unlock(&state_lock);
if (status) {
/* Should never happen, this is a new fi_deleg_file */
WARN_ON_ONCE(1);
goto out_fput;
}
return 0;
out_unlock:
spin_unlock(&fp->fi_lock);
spin_unlock(&state_lock);
out_fput:
fput(filp);
return status;
}
static struct nfs4_delegation *
nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
{
int status;
struct nfs4_delegation *dp;
if (fp->fi_had_conflict)
return ERR_PTR(-EAGAIN);
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
status = nfs4_get_existing_delegation(clp, fp);
spin_unlock(&fp->fi_lock);
spin_unlock(&state_lock);
if (status)
return ERR_PTR(status);
dp = alloc_init_deleg(clp, fh, odstate);
if (!dp)
return ERR_PTR(-ENOMEM);
get_nfs4_file(fp);
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
dp->dl_stid.sc_file = fp;
if (!fp->fi_deleg_file) {
spin_unlock(&fp->fi_lock);
spin_unlock(&state_lock);
status = nfs4_setlease(dp);
goto out;
}
if (fp->fi_had_conflict) {
status = -EAGAIN;
goto out_unlock;
}
status = hash_delegation_locked(dp, fp);
out_unlock:
spin_unlock(&fp->fi_lock);
spin_unlock(&state_lock);
out:
if (status) {
put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_stid(&dp->dl_stid);
return ERR_PTR(status);
}
return dp;
}
static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
{
open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
if (status == -EAGAIN)
open->op_why_no_deleg = WND4_CONTENTION;
else {
open->op_why_no_deleg = WND4_RESOURCE;
switch (open->op_deleg_want) {
case NFS4_SHARE_WANT_READ_DELEG:
case NFS4_SHARE_WANT_WRITE_DELEG:
case NFS4_SHARE_WANT_ANY_DELEG:
break;
case NFS4_SHARE_WANT_CANCEL:
open->op_why_no_deleg = WND4_CANCELLED;
break;
case NFS4_SHARE_WANT_NO_DELEG:
WARN_ON_ONCE(1);
}
}
}
/*
* Attempt to hand out a delegation.
*
* Note we don't support write delegations, and won't until the vfs has
* proper support for them.
*/
static void
nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
struct nfs4_ol_stateid *stp)
{
struct nfs4_delegation *dp;
struct nfs4_openowner *oo = openowner(stp->st_stateowner);
struct nfs4_client *clp = stp->st_stid.sc_client;
int cb_up;
int status = 0;
cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
open->op_recall = 0;
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_PREVIOUS:
if (!cb_up)
open->op_recall = 1;
if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
goto out_no_deleg;
break;
case NFS4_OPEN_CLAIM_NULL:
case NFS4_OPEN_CLAIM_FH:
/*
* Let's not give out any delegations till everyone's
* had the chance to reclaim theirs, *and* until
* NLM locks have all been reclaimed:
*/
if (locks_in_grace(clp->net))
goto out_no_deleg;
if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
goto out_no_deleg;
/*
* Also, if the file was opened for write or
* create, there's a good chance the client's
* about to write to it, resulting in an
* immediate recall (since we don't support
* write delegations):
*/
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
goto out_no_deleg;
if (open->op_create == NFS4_OPEN_CREATE)
goto out_no_deleg;
break;
default:
goto out_no_deleg;
}
dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
if (IS_ERR(dp))
goto out_no_deleg;
memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
STATEID_VAL(&dp->dl_stid.sc_stateid));
open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
nfs4_put_stid(&dp->dl_stid);
return;
out_no_deleg:
open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
dprintk("NFSD: WARNING: refusing delegation reclaim\n");
open->op_recall = 1;
}
/* 4.1 client asking for a delegation? */
if (open->op_deleg_want)
nfsd4_open_deleg_none_ext(open, status);
return;
}
static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
struct nfs4_delegation *dp)
{
if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
} else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
}
/* Otherwise the client must be confused wanting a delegation
* it already has, therefore we don't return
* NFS4_OPEN_DELEGATE_NONE_EXT and reason.
*/
}
__be32
nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
struct nfs4_file *fp = NULL;
struct nfs4_ol_stateid *stp = NULL;
struct nfs4_delegation *dp = NULL;
__be32 status;
/*
* Lookup file; if found, lookup stateid and check open request,
* and check for delegations in the process of being recalled.
* If not found, create the nfs4_file struct
*/
fp = find_or_add_file(open->op_file, ¤t_fh->fh_handle);
if (fp != open->op_file) {
status = nfs4_check_deleg(cl, open, &dp);
if (status)
goto out;
spin_lock(&fp->fi_lock);
stp = nfsd4_find_existing_open(fp, open);
spin_unlock(&fp->fi_lock);
} else {
open->op_file = NULL;
status = nfserr_bad_stateid;
if (nfsd4_is_deleg_cur(open))
goto out;
}
/*
* OPEN the file, or upgrade an existing OPEN.
* If truncate fails, the OPEN fails.
*/
if (stp) {
/* Stateid was found, this is an OPEN upgrade */
mutex_lock(&stp->st_mutex);
status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
if (status) {
mutex_unlock(&stp->st_mutex);
goto out;
}
} else {
/* stp is returned locked. */
stp = init_open_stateid(fp, open);
/* See if we lost the race to some other thread */
if (stp->st_access_bmap != 0) {
status = nfs4_upgrade_open(rqstp, fp, current_fh,
stp, open);
if (status) {
mutex_unlock(&stp->st_mutex);
goto out;
}
goto upgrade_out;
}
status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
if (status) {
mutex_unlock(&stp->st_mutex);
release_open_stateid(stp);
goto out;
}
stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
open->op_odstate);
if (stp->st_clnt_odstate == open->op_odstate)
open->op_odstate = NULL;
}
upgrade_out:
nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex);
if (nfsd4_has_session(&resp->cstate)) {
if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
open->op_why_no_deleg = WND4_NOT_WANTED;
goto nodeleg;
}
}
/*
* Attempt to hand out a delegation. No error return, because the
* OPEN succeeds even if we fail.
*/
nfs4_open_delegation(current_fh, open, stp);
nodeleg:
status = nfs_ok;
dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
STATEID_VAL(&stp->st_stid.sc_stateid));
out:
/* 4.1 client trying to upgrade/downgrade delegation? */
if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
open->op_deleg_want)
nfsd4_deleg_xgrade_none_ext(open, dp);
if (fp)
put_nfs4_file(fp);
if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
/*
* To finish the open response, we just need to set the rflags.
*/
open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
if (nfsd4_has_session(&resp->cstate))
open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
if (dp)
nfs4_put_stid(&dp->dl_stid);
if (stp)
nfs4_put_stid(&stp->st_stid);
return status;
}
void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
struct nfsd4_open *open)
{
if (open->op_openowner) {
struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
nfsd4_cstate_assign_replay(cstate, so);
nfs4_put_stateowner(so);
}
if (open->op_file)
kmem_cache_free(file_slab, open->op_file);
if (open->op_stp)
nfs4_put_stid(&open->op_stp->st_stid);
if (open->op_odstate)
kmem_cache_free(odstate_slab, open->op_odstate);
}
__be32
nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
clientid_t *clid)
{
struct nfs4_client *clp;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
dprintk("process_renew(%08x/%08x): starting\n",
clid->cl_boot, clid->cl_id);
status = lookup_clientid(clid, cstate, nn);
if (status)
goto out;
clp = cstate->clp;
status = nfserr_cb_path_down;
if (!list_empty(&clp->cl_delegations)
&& clp->cl_cb_state != NFSD4_CB_UP)
goto out;
status = nfs_ok;
out:
return status;
}
void
nfsd4_end_grace(struct nfsd_net *nn)
{
/* do nothing if grace period already ended */
if (nn->grace_ended)
return;
dprintk("NFSD: end of grace period\n");
nn->grace_ended = true;
/*
* If the server goes down again right now, an NFSv4
* client will still be allowed to reclaim after it comes back up,
* even if it hasn't yet had a chance to reclaim state this time.
*
*/
nfsd4_record_grace_done(nn);
/*
* At this point, NFSv4 clients can still reclaim. But if the
* server crashes, any that have not yet reclaimed will be out
* of luck on the next boot.
*
* (NFSv4.1+ clients are considered to have reclaimed once they
* call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
* have reclaimed after their first OPEN.)
*/
locks_end_grace(&nn->nfsd4_manager);
/*
* At this point, and once lockd and/or any other containers
* exit their grace period, further reclaims will fail and
* regular locking can resume.
*/
}
static time_t
nfs4_laundromat(struct nfsd_net *nn)
{
struct nfs4_client *clp;
struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
struct nfs4_ol_stateid *stp;
struct nfsd4_blocked_lock *nbl;
struct list_head *pos, *next, reaplist;
time_t cutoff = get_seconds() - nn->nfsd4_lease;
time_t t, new_timeo = nn->nfsd4_lease;
dprintk("NFSD: laundromat service - starting\n");
nfsd4_end_grace(nn);
INIT_LIST_HEAD(&reaplist);
spin_lock(&nn->client_lock);
list_for_each_safe(pos, next, &nn->client_lru) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
t = clp->cl_time - cutoff;
new_timeo = min(new_timeo, t);
break;
}
if (mark_client_expired_locked(clp)) {
dprintk("NFSD: client in use (clientid %08x)\n",
clp->cl_clientid.cl_id);
continue;
}
list_add(&clp->cl_lru, &reaplist);
}
spin_unlock(&nn->client_lock);
list_for_each_safe(pos, next, &reaplist) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
dprintk("NFSD: purging unused client (clientid %08x)\n",
clp->cl_clientid.cl_id);
list_del_init(&clp->cl_lru);
expire_client(clp);
}
spin_lock(&state_lock);
list_for_each_safe(pos, next, &nn->del_recall_lru) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
t = dp->dl_time - cutoff;
new_timeo = min(new_timeo, t);
break;
}
WARN_ON(!unhash_delegation_locked(dp));
list_add(&dp->dl_recall_lru, &reaplist);
}
spin_unlock(&state_lock);
while (!list_empty(&reaplist)) {
dp = list_first_entry(&reaplist, struct nfs4_delegation,
dl_recall_lru);
list_del_init(&dp->dl_recall_lru);
revoke_delegation(dp);
}
spin_lock(&nn->client_lock);
while (!list_empty(&nn->close_lru)) {
oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
oo_close_lru);
if (time_after((unsigned long)oo->oo_time,
(unsigned long)cutoff)) {
t = oo->oo_time - cutoff;
new_timeo = min(new_timeo, t);
break;
}
list_del_init(&oo->oo_close_lru);
stp = oo->oo_last_closed_stid;
oo->oo_last_closed_stid = NULL;
spin_unlock(&nn->client_lock);
nfs4_put_stid(&stp->st_stid);
spin_lock(&nn->client_lock);
}
spin_unlock(&nn->client_lock);
/*
* It's possible for a client to try and acquire an already held lock
* that is being held for a long time, and then lose interest in it.
* So, we clean out any un-revisited request after a lease period
* under the assumption that the client is no longer interested.
*
* RFC5661, sec. 9.6 states that the client must not rely on getting
* notifications and must continue to poll for locks, even when the
* server supports them. Thus this shouldn't lead to clients blocking
* indefinitely once the lock does become free.
*/
BUG_ON(!list_empty(&reaplist));
spin_lock(&nn->blocked_locks_lock);
while (!list_empty(&nn->blocked_locks_lru)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
struct nfsd4_blocked_lock, nbl_lru);
if (time_after((unsigned long)nbl->nbl_time,
(unsigned long)cutoff)) {
t = nbl->nbl_time - cutoff;
new_timeo = min(new_timeo, t);
break;
}
list_move(&nbl->nbl_lru, &reaplist);
list_del_init(&nbl->nbl_list);
}
spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
struct nfsd4_blocked_lock, nbl_lru);
list_del_init(&nbl->nbl_lru);
posix_unblock_lock(&nbl->nbl_lock);
free_blocked_lock(nbl);
}
new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
return new_timeo;
}
static struct workqueue_struct *laundry_wq;
static void laundromat_main(struct work_struct *);
static void
laundromat_main(struct work_struct *laundry)
{
time_t t;
struct delayed_work *dwork = to_delayed_work(laundry);
struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
laundromat_work);
t = nfs4_laundromat(nn);
dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
}
static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
{
if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
return nfserr_bad_stateid;
return nfs_ok;
}
static inline int
access_permit_read(struct nfs4_ol_stateid *stp)
{
return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
test_access(NFS4_SHARE_ACCESS_WRITE, stp);
}
static inline int
access_permit_write(struct nfs4_ol_stateid *stp)
{
return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
test_access(NFS4_SHARE_ACCESS_BOTH, stp);
}
static
__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
{
__be32 status = nfserr_openmode;
/* For lock stateid's, we test the parent open, not the lock: */
if (stp->st_openstp)
stp = stp->st_openstp;
if ((flags & WR_STATE) && !access_permit_write(stp))
goto out;
if ((flags & RD_STATE) && !access_permit_read(stp))
goto out;
status = nfs_ok;
out:
return status;
}
static inline __be32
check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
{
if (ONE_STATEID(stateid) && (flags & RD_STATE))
return nfs_ok;
else if (opens_in_grace(net)) {
/* Answer in remaining cases depends on existence of
* conflicting state; so we must wait out the grace period. */
return nfserr_grace;
} else if (flags & WR_STATE)
return nfs4_share_conflict(current_fh,
NFS4_SHARE_DENY_WRITE);
else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
return nfs4_share_conflict(current_fh,
NFS4_SHARE_DENY_READ);
}
/*
* Allow READ/WRITE during grace period on recovered state only for files
* that are not able to provide mandatory locking.
*/
static inline int
grace_disallows_io(struct net *net, struct inode *inode)
{
return opens_in_grace(net) && mandatory_lock(inode);
}
static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
{
/*
* When sessions are used the stateid generation number is ignored
* when it is zero.
*/
if (has_session && in->si_generation == 0)
return nfs_ok;
if (in->si_generation == ref->si_generation)
return nfs_ok;
/* If the client sends us a stateid from the future, it's buggy: */
if (nfsd4_stateid_generation_after(in, ref))
return nfserr_bad_stateid;
/*
* However, we could see a stateid from the past, even from a
* non-buggy client. For example, if the client sends a lock
* while some IO is outstanding, the lock may bump si_generation
* while the IO is still in flight. The client could avoid that
* situation by waiting for responses on all the IO requests,
* but better performance may result in retrying IO that
* receives an old_stateid error if requests are rarely
* reordered in flight:
*/
return nfserr_old_stateid;
}
static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
{
if (ols->st_stateowner->so_is_open_owner &&
!(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
return nfserr_bad_stateid;
return nfs_ok;
}
static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
{
struct nfs4_stid *s;
__be32 status = nfserr_bad_stateid;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
return status;
/* Client debugging aid. */
if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
char addr_str[INET6_ADDRSTRLEN];
rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
sizeof(addr_str));
pr_warn_ratelimited("NFSD: client %s testing state ID "
"with incorrect client ID\n", addr_str);
return status;
}
spin_lock(&cl->cl_lock);
s = find_stateid_locked(cl, stateid);
if (!s)
goto out_unlock;
status = check_stateid_generation(stateid, &s->sc_stateid, 1);
if (status)
goto out_unlock;
switch (s->sc_type) {
case NFS4_DELEG_STID:
status = nfs_ok;
break;
case NFS4_REVOKED_DELEG_STID:
status = nfserr_deleg_revoked;
break;
case NFS4_OPEN_STID:
case NFS4_LOCK_STID:
status = nfsd4_check_openowner_confirmed(openlockstateid(s));
break;
default:
printk("unknown stateid type %x\n", s->sc_type);
/* Fallthrough */
case NFS4_CLOSED_STID:
case NFS4_CLOSED_DELEG_STID:
status = nfserr_bad_stateid;
}
out_unlock:
spin_unlock(&cl->cl_lock);
return status;
}
__be32
nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
stateid_t *stateid, unsigned char typemask,
struct nfs4_stid **s, struct nfsd_net *nn)
{
__be32 status;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
return nfserr_bad_stateid;
status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
if (status == nfserr_stale_clientid) {
if (cstate->session)
return nfserr_bad_stateid;
return nfserr_stale_stateid;
}
if (status)
return status;
*s = find_stateid_by_type(cstate->clp, stateid, typemask);
if (!*s)
return nfserr_bad_stateid;
return nfs_ok;
}
static struct file *
nfs4_find_file(struct nfs4_stid *s, int flags)
{
if (!s)
return NULL;
switch (s->sc_type) {
case NFS4_DELEG_STID:
if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
return NULL;
return get_file(s->sc_file->fi_deleg_file);
case NFS4_OPEN_STID:
case NFS4_LOCK_STID:
if (flags & RD_STATE)
return find_readable_file(s->sc_file);
else
return find_writeable_file(s->sc_file);
break;
}
return NULL;
}
static __be32
nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
{
__be32 status;
status = nfsd4_check_openowner_confirmed(ols);
if (status)
return status;
return nfs4_check_openmode(ols, flags);
}
static __be32
nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
struct file **filpp, bool *tmp_file, int flags)
{
int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
struct file *file;
__be32 status;
file = nfs4_find_file(s, flags);
if (file) {
status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
acc | NFSD_MAY_OWNER_OVERRIDE);
if (status) {
fput(file);
return status;
}
*filpp = file;
} else {
status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp);
if (status)
return status;
if (tmp_file)
*tmp_file = true;
}
return 0;
}
/*
* Checks for stateid operations
*/
__be32
nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
stateid_t *stateid, int flags, struct file **filpp, bool *tmp_file)
{
struct inode *ino = d_inode(fhp->fh_dentry);
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct nfs4_stid *s = NULL;
__be32 status;
if (filpp)
*filpp = NULL;
if (tmp_file)
*tmp_file = false;
if (grace_disallows_io(net, ino))
return nfserr_grace;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
status = check_special_stateids(net, fhp, stateid, flags);
goto done;
}
status = nfsd4_lookup_stateid(cstate, stateid,
NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
&s, nn);
if (status)
return status;
status = check_stateid_generation(stateid, &s->sc_stateid,
nfsd4_has_session(cstate));
if (status)
goto out;
switch (s->sc_type) {
case NFS4_DELEG_STID:
status = nfs4_check_delegmode(delegstateid(s), flags);
break;
case NFS4_OPEN_STID:
case NFS4_LOCK_STID:
status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
break;
default:
status = nfserr_bad_stateid;
break;
}
if (status)
goto out;
status = nfs4_check_fh(fhp, s);
done:
if (!status && filpp)
status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags);
out:
if (s)
nfs4_put_stid(s);
return status;
}
/*
* Test if the stateid is valid
*/
__be32
nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_test_stateid *test_stateid)
{
struct nfsd4_test_stateid_id *stateid;
struct nfs4_client *cl = cstate->session->se_client;
list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
stateid->ts_id_status =
nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
return nfs_ok;
}
static __be32
nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
{
struct nfs4_ol_stateid *stp = openlockstateid(s);
__be32 ret;
mutex_lock(&stp->st_mutex);
ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
if (ret)
goto out;
ret = nfserr_locks_held;
if (check_for_locks(stp->st_stid.sc_file,
lockowner(stp->st_stateowner)))
goto out;
release_lock_stateid(stp);
ret = nfs_ok;
out:
mutex_unlock(&stp->st_mutex);
nfs4_put_stid(s);
return ret;
}
__be32
nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_free_stateid *free_stateid)
{
stateid_t *stateid = &free_stateid->fr_stateid;
struct nfs4_stid *s;
struct nfs4_delegation *dp;
struct nfs4_client *cl = cstate->session->se_client;
__be32 ret = nfserr_bad_stateid;
spin_lock(&cl->cl_lock);
s = find_stateid_locked(cl, stateid);
if (!s)
goto out_unlock;
switch (s->sc_type) {
case NFS4_DELEG_STID:
ret = nfserr_locks_held;
break;
case NFS4_OPEN_STID:
ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
if (ret)
break;
ret = nfserr_locks_held;
break;
case NFS4_LOCK_STID:
atomic_inc(&s->sc_count);
spin_unlock(&cl->cl_lock);
ret = nfsd4_free_lock_stateid(stateid, s);
goto out;
case NFS4_REVOKED_DELEG_STID:
dp = delegstateid(s);
list_del_init(&dp->dl_recall_lru);
spin_unlock(&cl->cl_lock);
nfs4_put_stid(s);
ret = nfs_ok;
goto out;
/* Default falls through and returns nfserr_bad_stateid */
}
out_unlock:
spin_unlock(&cl->cl_lock);
out:
return ret;
}
static inline int
setlkflg (int type)
{
return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
RD_STATE : WR_STATE;
}
static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
{
struct svc_fh *current_fh = &cstate->current_fh;
struct nfs4_stateowner *sop = stp->st_stateowner;
__be32 status;
status = nfsd4_check_seqid(cstate, sop, seqid);
if (status)
return status;
if (stp->st_stid.sc_type == NFS4_CLOSED_STID
|| stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
/*
* "Closed" stateid's exist *only* to return
* nfserr_replay_me from the previous step, and
* revoked delegations are kept only for free_stateid.
*/
return nfserr_bad_stateid;
mutex_lock(&stp->st_mutex);
status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
if (status == nfs_ok)
status = nfs4_check_fh(current_fh, &stp->st_stid);
if (status != nfs_ok)
mutex_unlock(&stp->st_mutex);
return status;
}
/*
* Checks for sequence id mutating operations.
*/
static __be32
nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
stateid_t *stateid, char typemask,
struct nfs4_ol_stateid **stpp,
struct nfsd_net *nn)
{
__be32 status;
struct nfs4_stid *s;
struct nfs4_ol_stateid *stp = NULL;
dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
seqid, STATEID_VAL(stateid));
*stpp = NULL;
status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
if (status)
return status;
stp = openlockstateid(s);
nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
if (!status)
*stpp = stp;
else
nfs4_put_stid(&stp->st_stid);
return status;
}
static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
{
__be32 status;
struct nfs4_openowner *oo;
struct nfs4_ol_stateid *stp;
status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
NFS4_OPEN_STID, &stp, nn);
if (status)
return status;
oo = openowner(stp->st_stateowner);
if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
mutex_unlock(&stp->st_mutex);
nfs4_put_stid(&stp->st_stid);
return nfserr_bad_stateid;
}
*stpp = stp;
return nfs_ok;
}
__be32
nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_open_confirm *oc)
{
__be32 status;
struct nfs4_openowner *oo;
struct nfs4_ol_stateid *stp;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
cstate->current_fh.fh_dentry);
status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
if (status)
return status;
status = nfs4_preprocess_seqid_op(cstate,
oc->oc_seqid, &oc->oc_req_stateid,
NFS4_OPEN_STID, &stp, nn);
if (status)
goto out;
oo = openowner(stp->st_stateowner);
status = nfserr_bad_stateid;
if (oo->oo_flags & NFS4_OO_CONFIRMED) {
mutex_unlock(&stp->st_mutex);
goto put_stateid;
}
oo->oo_flags |= NFS4_OO_CONFIRMED;
nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex);
dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
nfsd4_client_record_create(oo->oo_owner.so_client);
status = nfs_ok;
put_stateid:
nfs4_put_stid(&stp->st_stid);
out:
nfsd4_bump_seqid(cstate, status);
return status;
}
static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
{
if (!test_access(access, stp))
return;
nfs4_file_put_access(stp->st_stid.sc_file, access);
clear_access(access, stp);
}
static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
{
switch (to_access) {
case NFS4_SHARE_ACCESS_READ:
nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
break;
case NFS4_SHARE_ACCESS_WRITE:
nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
break;
case NFS4_SHARE_ACCESS_BOTH:
break;
default:
WARN_ON_ONCE(1);
}
}
__be32
nfsd4_open_downgrade(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_open_downgrade *od)
{
__be32 status;
struct nfs4_ol_stateid *stp;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
cstate->current_fh.fh_dentry);
/* We don't yet support WANT bits: */
if (od->od_deleg_want)
dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
od->od_deleg_want);
status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
&od->od_stateid, &stp, nn);
if (status)
goto out;
status = nfserr_inval;
if (!test_access(od->od_share_access, stp)) {
dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
stp->st_access_bmap, od->od_share_access);
goto put_stateid;
}
if (!test_deny(od->od_share_deny, stp)) {
dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
stp->st_deny_bmap, od->od_share_deny);
goto put_stateid;
}
nfs4_stateid_downgrade(stp, od->od_share_access);
reset_union_bmap_deny(od->od_share_deny, stp);
nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
status = nfs_ok;
put_stateid:
mutex_unlock(&stp->st_mutex);
nfs4_put_stid(&stp->st_stid);
out:
nfsd4_bump_seqid(cstate, status);
return status;
}
static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
{
struct nfs4_client *clp = s->st_stid.sc_client;
bool unhashed;
LIST_HEAD(reaplist);
s->st_stid.sc_type = NFS4_CLOSED_STID;
spin_lock(&clp->cl_lock);
unhashed = unhash_open_stateid(s, &reaplist);
if (clp->cl_minorversion) {
if (unhashed)
put_ol_stateid_locked(s, &reaplist);
spin_unlock(&clp->cl_lock);
free_ol_stateid_reaplist(&reaplist);
} else {
spin_unlock(&clp->cl_lock);
free_ol_stateid_reaplist(&reaplist);
if (unhashed)
move_to_close_lru(s, clp->net);
}
}
/*
* nfs4_unlock_state() called after encode
*/
__be32
nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_close *close)
{
__be32 status;
struct nfs4_ol_stateid *stp;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dprintk("NFSD: nfsd4_close on file %pd\n",
cstate->current_fh.fh_dentry);
status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
&close->cl_stateid,
NFS4_OPEN_STID|NFS4_CLOSED_STID,
&stp, nn);
nfsd4_bump_seqid(cstate, status);
if (status)
goto out;
nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex);
nfsd4_close_open_stateid(stp);
/* put reference from nfs4_preprocess_seqid_op */
nfs4_put_stid(&stp->st_stid);
out:
return status;
}
__be32
nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_delegreturn *dr)
{
struct nfs4_delegation *dp;
stateid_t *stateid = &dr->dr_stateid;
struct nfs4_stid *s;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
return status;
status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
if (status)
goto out;
dp = delegstateid(s);
status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
if (status)
goto put_stateid;
destroy_delegation(dp);
put_stateid:
nfs4_put_stid(&dp->dl_stid);
out:
return status;
}
static inline u64
end_offset(u64 start, u64 len)
{
u64 end;
end = start + len;
return end >= start ? end: NFS4_MAX_UINT64;
}
/* last octet in a range */
static inline u64
last_byte_offset(u64 start, u64 len)
{
u64 end;
WARN_ON_ONCE(!len);
end = start + len;
return end > start ? end - 1: NFS4_MAX_UINT64;
}
/*
* TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
* we can't properly handle lock requests that go beyond the (2^63 - 1)-th
* byte, because of sign extension problems. Since NFSv4 calls for 64-bit
* locking, this prevents us from being completely protocol-compliant. The
* real solution to this problem is to start using unsigned file offsets in
* the VFS, but this is a very deep change!
*/
static inline void
nfs4_transform_lock_offset(struct file_lock *lock)
{
if (lock->fl_start < 0)
lock->fl_start = OFFSET_MAX;
if (lock->fl_end < 0)
lock->fl_end = OFFSET_MAX;
}
static fl_owner_t
nfsd4_fl_get_owner(fl_owner_t owner)
{
struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
nfs4_get_stateowner(&lo->lo_owner);
return owner;
}
static void
nfsd4_fl_put_owner(fl_owner_t owner)
{
struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
if (lo)
nfs4_put_stateowner(&lo->lo_owner);
}
static void
nfsd4_lm_notify(struct file_lock *fl)
{
struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
struct net *net = lo->lo_owner.so_client->net;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct nfsd4_blocked_lock *nbl = container_of(fl,
struct nfsd4_blocked_lock, nbl_lock);
bool queue = false;
/* An empty list means that something else is going to be using it */
spin_lock(&nn->blocked_locks_lock);
if (!list_empty(&nbl->nbl_list)) {
list_del_init(&nbl->nbl_list);
list_del_init(&nbl->nbl_lru);
queue = true;
}
spin_unlock(&nn->blocked_locks_lock);
if (queue)
nfsd4_run_cb(&nbl->nbl_cb);
}
static const struct lock_manager_operations nfsd_posix_mng_ops = {
.lm_notify = nfsd4_lm_notify,
.lm_get_owner = nfsd4_fl_get_owner,
.lm_put_owner = nfsd4_fl_put_owner,
};
static inline void
nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
{
struct nfs4_lockowner *lo;
if (fl->fl_lmops == &nfsd_posix_mng_ops) {
lo = (struct nfs4_lockowner *) fl->fl_owner;
deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
lo->lo_owner.so_owner.len, GFP_KERNEL);
if (!deny->ld_owner.data)
/* We just don't care that much */
goto nevermind;
deny->ld_owner.len = lo->lo_owner.so_owner.len;
deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
} else {
nevermind:
deny->ld_owner.len = 0;
deny->ld_owner.data = NULL;
deny->ld_clientid.cl_boot = 0;
deny->ld_clientid.cl_id = 0;
}
deny->ld_start = fl->fl_start;
deny->ld_length = NFS4_MAX_UINT64;
if (fl->fl_end != NFS4_MAX_UINT64)
deny->ld_length = fl->fl_end - fl->fl_start + 1;
deny->ld_type = NFS4_READ_LT;
if (fl->fl_type != F_RDLCK)
deny->ld_type = NFS4_WRITE_LT;
}
static struct nfs4_lockowner *
find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
{
unsigned int strhashval = ownerstr_hashval(owner);
struct nfs4_stateowner *so;
lockdep_assert_held(&clp->cl_lock);
list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
so_strhash) {
if (so->so_is_open_owner)
continue;
if (same_owner_str(so, owner))
return lockowner(nfs4_get_stateowner(so));
}
return NULL;
}
static struct nfs4_lockowner *
find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
{
struct nfs4_lockowner *lo;
spin_lock(&clp->cl_lock);
lo = find_lockowner_str_locked(clp, owner);
spin_unlock(&clp->cl_lock);
return lo;
}
static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
{
unhash_lockowner_locked(lockowner(sop));
}
static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
{
struct nfs4_lockowner *lo = lockowner(sop);
kmem_cache_free(lockowner_slab, lo);
}
static const struct nfs4_stateowner_operations lockowner_ops = {
.so_unhash = nfs4_unhash_lockowner,
.so_free = nfs4_free_lockowner,
};
/*
* Alloc a lock owner structure.
* Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
* occurred.
*
* strhashval = ownerstr_hashval
*/
static struct nfs4_lockowner *
alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
struct nfs4_ol_stateid *open_stp,
struct nfsd4_lock *lock)
{
struct nfs4_lockowner *lo, *ret;
lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
if (!lo)
return NULL;
INIT_LIST_HEAD(&lo->lo_blocked);
INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
lo->lo_owner.so_is_open_owner = 0;
lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
lo->lo_owner.so_ops = &lockowner_ops;
spin_lock(&clp->cl_lock);
ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
if (ret == NULL) {
list_add(&lo->lo_owner.so_strhash,
&clp->cl_ownerstr_hashtbl[strhashval]);
ret = lo;
} else
nfs4_free_stateowner(&lo->lo_owner);
spin_unlock(&clp->cl_lock);
return ret;
}
static void
init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
struct nfs4_file *fp, struct inode *inode,
struct nfs4_ol_stateid *open_stp)
{
struct nfs4_client *clp = lo->lo_owner.so_client;
lockdep_assert_held(&clp->cl_lock);
atomic_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_LOCK_STID;
stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
get_nfs4_file(fp);
stp->st_stid.sc_file = fp;
stp->st_access_bmap = 0;
stp->st_deny_bmap = open_stp->st_deny_bmap;
stp->st_openstp = open_stp;
mutex_init(&stp->st_mutex);
list_add(&stp->st_locks, &open_stp->st_locks);
list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
spin_lock(&fp->fi_lock);
list_add(&stp->st_perfile, &fp->fi_stateids);
spin_unlock(&fp->fi_lock);
}
static struct nfs4_ol_stateid *
find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
{
struct nfs4_ol_stateid *lst;
struct nfs4_client *clp = lo->lo_owner.so_client;
lockdep_assert_held(&clp->cl_lock);
list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
if (lst->st_stid.sc_file == fp) {
atomic_inc(&lst->st_stid.sc_count);
return lst;
}
}
return NULL;
}
static struct nfs4_ol_stateid *
find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
struct inode *inode, struct nfs4_ol_stateid *ost,
bool *new)
{
struct nfs4_stid *ns = NULL;
struct nfs4_ol_stateid *lst;
struct nfs4_openowner *oo = openowner(ost->st_stateowner);
struct nfs4_client *clp = oo->oo_owner.so_client;
spin_lock(&clp->cl_lock);
lst = find_lock_stateid(lo, fi);
if (lst == NULL) {
spin_unlock(&clp->cl_lock);
ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
if (ns == NULL)
return NULL;
spin_lock(&clp->cl_lock);
lst = find_lock_stateid(lo, fi);
if (likely(!lst)) {
lst = openlockstateid(ns);
init_lock_stateid(lst, lo, fi, inode, ost);
ns = NULL;
*new = true;
}
}
spin_unlock(&clp->cl_lock);
if (ns)
nfs4_put_stid(ns);
return lst;
}
static int
check_lock_length(u64 offset, u64 length)
{
return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
(length > ~offset)));
}
static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
{
struct nfs4_file *fp = lock_stp->st_stid.sc_file;
lockdep_assert_held(&fp->fi_lock);
if (test_access(access, lock_stp))
return;
__nfs4_file_get_access(fp, access);
set_access(access, lock_stp);
}
static __be32
lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
struct nfs4_ol_stateid *ost,
struct nfsd4_lock *lock,
struct nfs4_ol_stateid **plst, bool *new)
{
__be32 status;
struct nfs4_file *fi = ost->st_stid.sc_file;
struct nfs4_openowner *oo = openowner(ost->st_stateowner);
struct nfs4_client *cl = oo->oo_owner.so_client;
struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
struct nfs4_lockowner *lo;
struct nfs4_ol_stateid *lst;
unsigned int strhashval;
bool hashed;
lo = find_lockowner_str(cl, &lock->lk_new_owner);
if (!lo) {
strhashval = ownerstr_hashval(&lock->lk_new_owner);
lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
if (lo == NULL)
return nfserr_jukebox;
} else {
/* with an existing lockowner, seqids must be the same */
status = nfserr_bad_seqid;
if (!cstate->minorversion &&
lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
goto out;
}
retry:
lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
if (lst == NULL) {
status = nfserr_jukebox;
goto out;
}
mutex_lock(&lst->st_mutex);
/* See if it's still hashed to avoid race with FREE_STATEID */
spin_lock(&cl->cl_lock);
hashed = !list_empty(&lst->st_perfile);
spin_unlock(&cl->cl_lock);
if (!hashed) {
mutex_unlock(&lst->st_mutex);
nfs4_put_stid(&lst->st_stid);
goto retry;
}
status = nfs_ok;
*plst = lst;
out:
nfs4_put_stateowner(&lo->lo_owner);
return status;
}
/*
* LOCK operation
*/
__be32
nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_lock *lock)
{
struct nfs4_openowner *open_sop = NULL;
struct nfs4_lockowner *lock_sop = NULL;
struct nfs4_ol_stateid *lock_stp = NULL;
struct nfs4_ol_stateid *open_stp = NULL;
struct nfs4_file *fp;
struct file *filp = NULL;
struct nfsd4_blocked_lock *nbl = NULL;
struct file_lock *file_lock = NULL;
struct file_lock *conflock = NULL;
__be32 status = 0;
int lkflg;
int err;
bool new = false;
unsigned char fl_type;
unsigned int fl_flags = FL_POSIX;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
(long long) lock->lk_offset,
(long long) lock->lk_length);
if (check_lock_length(lock->lk_offset, lock->lk_length))
return nfserr_inval;
if ((status = fh_verify(rqstp, &cstate->current_fh,
S_IFREG, NFSD_MAY_LOCK))) {
dprintk("NFSD: nfsd4_lock: permission denied!\n");
return status;
}
if (lock->lk_is_new) {
if (nfsd4_has_session(cstate))
/* See rfc 5661 18.10.3: given clientid is ignored: */
memcpy(&lock->lk_new_clientid,
&cstate->session->se_client->cl_clientid,
sizeof(clientid_t));
status = nfserr_stale_clientid;
if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
goto out;
/* validate and update open stateid and open seqid */
status = nfs4_preprocess_confirmed_seqid_op(cstate,
lock->lk_new_open_seqid,
&lock->lk_new_open_stateid,
&open_stp, nn);
if (status)
goto out;
mutex_unlock(&open_stp->st_mutex);
open_sop = openowner(open_stp->st_stateowner);
status = nfserr_bad_stateid;
if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
&lock->lk_new_clientid))
goto out;
status = lookup_or_create_lock_state(cstate, open_stp, lock,
&lock_stp, &new);
} else {
status = nfs4_preprocess_seqid_op(cstate,
lock->lk_old_lock_seqid,
&lock->lk_old_lock_stateid,
NFS4_LOCK_STID, &lock_stp, nn);
}
if (status)
goto out;
lock_sop = lockowner(lock_stp->st_stateowner);
lkflg = setlkflg(lock->lk_type);
status = nfs4_check_openmode(lock_stp, lkflg);
if (status)
goto out;
status = nfserr_grace;
if (locks_in_grace(net) && !lock->lk_reclaim)
goto out;
status = nfserr_no_grace;
if (!locks_in_grace(net) && lock->lk_reclaim)
goto out;
fp = lock_stp->st_stid.sc_file;
switch (lock->lk_type) {
case NFS4_READW_LT:
if (nfsd4_has_session(cstate))
fl_flags |= FL_SLEEP;
/* Fallthrough */
case NFS4_READ_LT:
spin_lock(&fp->fi_lock);
filp = find_readable_file_locked(fp);
if (filp)
get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
spin_unlock(&fp->fi_lock);
fl_type = F_RDLCK;
break;
case NFS4_WRITEW_LT:
if (nfsd4_has_session(cstate))
fl_flags |= FL_SLEEP;
/* Fallthrough */
case NFS4_WRITE_LT:
spin_lock(&fp->fi_lock);
filp = find_writeable_file_locked(fp);
if (filp)
get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
spin_unlock(&fp->fi_lock);
fl_type = F_WRLCK;
break;
default:
status = nfserr_inval;
goto out;
}
if (!filp) {
status = nfserr_openmode;
goto out;
}
nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
if (!nbl) {
dprintk("NFSD: %s: unable to allocate block!\n", __func__);
status = nfserr_jukebox;
goto out;
}
file_lock = &nbl->nbl_lock;
file_lock->fl_type = fl_type;
file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
file_lock->fl_pid = current->tgid;
file_lock->fl_file = filp;
file_lock->fl_flags = fl_flags;
file_lock->fl_lmops = &nfsd_posix_mng_ops;
file_lock->fl_start = lock->lk_offset;
file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
nfs4_transform_lock_offset(file_lock);
conflock = locks_alloc_lock();
if (!conflock) {
dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
status = nfserr_jukebox;
goto out;
}
if (fl_flags & FL_SLEEP) {
nbl->nbl_time = jiffies;
spin_lock(&nn->blocked_locks_lock);
list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
spin_unlock(&nn->blocked_locks_lock);
}
err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
switch (err) {
case 0: /* success! */
nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
status = 0;
break;
case FILE_LOCK_DEFERRED:
nbl = NULL;
/* Fallthrough */
case -EAGAIN: /* conflock holds conflicting lock */
status = nfserr_denied;
dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
nfs4_set_lock_denied(conflock, &lock->lk_denied);
break;
case -EDEADLK:
status = nfserr_deadlock;
break;
default:
dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
status = nfserrno(err);
break;
}
out:
if (nbl) {
/* dequeue it if we queued it before */
if (fl_flags & FL_SLEEP) {
spin_lock(&nn->blocked_locks_lock);
list_del_init(&nbl->nbl_list);
list_del_init(&nbl->nbl_lru);
spin_unlock(&nn->blocked_locks_lock);
}
free_blocked_lock(nbl);
}
if (filp)
fput(filp);
if (lock_stp) {
/* Bump seqid manually if the 4.0 replay owner is openowner */
if (cstate->replay_owner &&
cstate->replay_owner != &lock_sop->lo_owner &&
seqid_mutating_err(ntohl(status)))
lock_sop->lo_owner.so_seqid++;
mutex_unlock(&lock_stp->st_mutex);
/*
* If this is a new, never-before-used stateid, and we are
* returning an error, then just go ahead and release it.
*/
if (status && new)
release_lock_stateid(lock_stp);
nfs4_put_stid(&lock_stp->st_stid);
}
if (open_stp)
nfs4_put_stid(&open_stp->st_stid);
nfsd4_bump_seqid(cstate, status);
if (conflock)
locks_free_lock(conflock);
return status;
}
/*
* The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
* so we do a temporary open here just to get an open file to pass to
* vfs_test_lock. (Arguably perhaps test_lock should be done with an
* inode operation.)
*/
static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
{
struct file *file;
__be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
if (!err) {
err = nfserrno(vfs_test_lock(file, lock));
fput(file);
}
return err;
}
/*
* LOCKT operation
*/
__be32
nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_lockt *lockt)
{
struct file_lock *file_lock = NULL;
struct nfs4_lockowner *lo = NULL;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if (locks_in_grace(SVC_NET(rqstp)))
return nfserr_grace;
if (check_lock_length(lockt->lt_offset, lockt->lt_length))
return nfserr_inval;
if (!nfsd4_has_session(cstate)) {
status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
if (status)
goto out;
}
if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
goto out;
file_lock = locks_alloc_lock();
if (!file_lock) {
dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
status = nfserr_jukebox;
goto out;
}
switch (lockt->lt_type) {
case NFS4_READ_LT:
case NFS4_READW_LT:
file_lock->fl_type = F_RDLCK;
break;
case NFS4_WRITE_LT:
case NFS4_WRITEW_LT:
file_lock->fl_type = F_WRLCK;
break;
default:
dprintk("NFSD: nfs4_lockt: bad lock type!\n");
status = nfserr_inval;
goto out;
}
lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
if (lo)
file_lock->fl_owner = (fl_owner_t)lo;
file_lock->fl_pid = current->tgid;
file_lock->fl_flags = FL_POSIX;
file_lock->fl_start = lockt->lt_offset;
file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
nfs4_transform_lock_offset(file_lock);
status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
if (status)
goto out;
if (file_lock->fl_type != F_UNLCK) {
status = nfserr_denied;
nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
}
out:
if (lo)
nfs4_put_stateowner(&lo->lo_owner);
if (file_lock)
locks_free_lock(file_lock);
return status;
}
__be32
nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_locku *locku)
{
struct nfs4_ol_stateid *stp;
struct file *filp = NULL;
struct file_lock *file_lock = NULL;
__be32 status;
int err;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
(long long) locku->lu_offset,
(long long) locku->lu_length);
if (check_lock_length(locku->lu_offset, locku->lu_length))
return nfserr_inval;
status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
&locku->lu_stateid, NFS4_LOCK_STID,
&stp, nn);
if (status)
goto out;
filp = find_any_file(stp->st_stid.sc_file);
if (!filp) {
status = nfserr_lock_range;
goto put_stateid;
}
file_lock = locks_alloc_lock();
if (!file_lock) {
dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
status = nfserr_jukebox;
goto fput;
}
file_lock->fl_type = F_UNLCK;
file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
file_lock->fl_pid = current->tgid;
file_lock->fl_file = filp;
file_lock->fl_flags = FL_POSIX;
file_lock->fl_lmops = &nfsd_posix_mng_ops;
file_lock->fl_start = locku->lu_offset;
file_lock->fl_end = last_byte_offset(locku->lu_offset,
locku->lu_length);
nfs4_transform_lock_offset(file_lock);
err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
if (err) {
dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
goto out_nfserr;
}
nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
fput:
fput(filp);
put_stateid:
mutex_unlock(&stp->st_mutex);
nfs4_put_stid(&stp->st_stid);
out:
nfsd4_bump_seqid(cstate, status);
if (file_lock)
locks_free_lock(file_lock);
return status;
out_nfserr:
status = nfserrno(err);
goto fput;
}
/*
* returns
* true: locks held by lockowner
* false: no locks held by lockowner
*/
static bool
check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
{
struct file_lock *fl;
int status = false;
struct file *filp = find_any_file(fp);
struct inode *inode;
struct file_lock_context *flctx;
if (!filp) {
/* Any valid lock stateid should have some sort of access */
WARN_ON_ONCE(1);
return status;
}
inode = file_inode(filp);
flctx = inode->i_flctx;
if (flctx && !list_empty_careful(&flctx->flc_posix)) {
spin_lock(&flctx->flc_lock);
list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
if (fl->fl_owner == (fl_owner_t)lowner) {
status = true;
break;
}
}
spin_unlock(&flctx->flc_lock);
}
fput(filp);
return status;
}
__be32
nfsd4_release_lockowner(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_release_lockowner *rlockowner)
{
clientid_t *clid = &rlockowner->rl_clientid;
struct nfs4_stateowner *sop;
struct nfs4_lockowner *lo = NULL;
struct nfs4_ol_stateid *stp;
struct xdr_netobj *owner = &rlockowner->rl_owner;
unsigned int hashval = ownerstr_hashval(owner);
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct nfs4_client *clp;
LIST_HEAD (reaplist);
dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
clid->cl_boot, clid->cl_id);
status = lookup_clientid(clid, cstate, nn);
if (status)
return status;
clp = cstate->clp;
/* Find the matching lock stateowner */
spin_lock(&clp->cl_lock);
list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
so_strhash) {
if (sop->so_is_open_owner || !same_owner_str(sop, owner))
continue;
/* see if there are still any locks associated with it */
lo = lockowner(sop);
list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
if (check_for_locks(stp->st_stid.sc_file, lo)) {
status = nfserr_locks_held;
spin_unlock(&clp->cl_lock);
return status;
}
}
nfs4_get_stateowner(sop);
break;
}
if (!lo) {
spin_unlock(&clp->cl_lock);
return status;
}
unhash_lockowner_locked(lo);
while (!list_empty(&lo->lo_owner.so_stateids)) {
stp = list_first_entry(&lo->lo_owner.so_stateids,
struct nfs4_ol_stateid,
st_perstateowner);
WARN_ON(!unhash_lock_stateid(stp));
put_ol_stateid_locked(stp, &reaplist);
}
spin_unlock(&clp->cl_lock);
free_ol_stateid_reaplist(&reaplist);
nfs4_put_stateowner(&lo->lo_owner);
return status;
}
static inline struct nfs4_client_reclaim *
alloc_reclaim(void)
{
return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
}
bool
nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
{
struct nfs4_client_reclaim *crp;
crp = nfsd4_find_reclaim_client(name, nn);
return (crp && crp->cr_clp);
}
/*
* failure => all reset bets are off, nfserr_no_grace...
*/
struct nfs4_client_reclaim *
nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
{
unsigned int strhashval;
struct nfs4_client_reclaim *crp;
dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
crp = alloc_reclaim();
if (crp) {
strhashval = clientstr_hashval(name);
INIT_LIST_HEAD(&crp->cr_strhash);
list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
memcpy(crp->cr_recdir, name, HEXDIR_LEN);
crp->cr_clp = NULL;
nn->reclaim_str_hashtbl_size++;
}
return crp;
}
void
nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
{
list_del(&crp->cr_strhash);
kfree(crp);
nn->reclaim_str_hashtbl_size--;
}
void
nfs4_release_reclaim(struct nfsd_net *nn)
{
struct nfs4_client_reclaim *crp = NULL;
int i;
for (i = 0; i < CLIENT_HASH_SIZE; i++) {
while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
crp = list_entry(nn->reclaim_str_hashtbl[i].next,
struct nfs4_client_reclaim, cr_strhash);
nfs4_remove_reclaim_record(crp, nn);
}
}
WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
}
/*
* called from OPEN, CLAIM_PREVIOUS with a new clientid. */
struct nfs4_client_reclaim *
nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
{
unsigned int strhashval;
struct nfs4_client_reclaim *crp = NULL;
dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
strhashval = clientstr_hashval(recdir);
list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
if (same_name(crp->cr_recdir, recdir)) {
return crp;
}
}
return NULL;
}
/*
* Called from OPEN. Look for clientid in reclaim list.
*/
__be32
nfs4_check_open_reclaim(clientid_t *clid,
struct nfsd4_compound_state *cstate,
struct nfsd_net *nn)
{
__be32 status;
/* find clientid in conf_id_hashtbl */
status = lookup_clientid(clid, cstate, nn);
if (status)
return nfserr_reclaim_bad;
if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
return nfserr_no_grace;
if (nfsd4_client_record_check(cstate->clp))
return nfserr_reclaim_bad;
return nfs_ok;
}
#ifdef CONFIG_NFSD_FAULT_INJECTION
static inline void
put_client(struct nfs4_client *clp)
{
atomic_dec(&clp->cl_refcount);
}
static struct nfs4_client *
nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
{
struct nfs4_client *clp;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
if (!nfsd_netns_ready(nn))
return NULL;
list_for_each_entry(clp, &nn->client_lru, cl_lru) {
if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
return clp;
}
return NULL;
}
u64
nfsd_inject_print_clients(void)
{
struct nfs4_client *clp;
u64 count = 0;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
char buf[INET6_ADDRSTRLEN];
if (!nfsd_netns_ready(nn))
return 0;
spin_lock(&nn->client_lock);
list_for_each_entry(clp, &nn->client_lru, cl_lru) {
rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
pr_info("NFS Client: %s\n", buf);
++count;
}
spin_unlock(&nn->client_lock);
return count;
}
u64
nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
{
u64 count = 0;
struct nfs4_client *clp;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
if (!nfsd_netns_ready(nn))
return count;
spin_lock(&nn->client_lock);
clp = nfsd_find_client(addr, addr_size);
if (clp) {
if (mark_client_expired_locked(clp) == nfs_ok)
++count;
else
clp = NULL;
}
spin_unlock(&nn->client_lock);
if (clp)
expire_client(clp);
return count;
}
u64
nfsd_inject_forget_clients(u64 max)
{
u64 count = 0;
struct nfs4_client *clp, *next;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
LIST_HEAD(reaplist);
if (!nfsd_netns_ready(nn))
return count;
spin_lock(&nn->client_lock);
list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
if (mark_client_expired_locked(clp) == nfs_ok) {
list_add(&clp->cl_lru, &reaplist);
if (max != 0 && ++count >= max)
break;
}
}
spin_unlock(&nn->client_lock);
list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
expire_client(clp);
return count;
}
static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
const char *type)
{
char buf[INET6_ADDRSTRLEN];
rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
}
static void
nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
struct list_head *collect)
{
struct nfs4_client *clp = lst->st_stid.sc_client;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
if (!collect)
return;
lockdep_assert_held(&nn->client_lock);
atomic_inc(&clp->cl_refcount);
list_add(&lst->st_locks, collect);
}
static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
struct list_head *collect,
bool (*func)(struct nfs4_ol_stateid *))
{
struct nfs4_openowner *oop;
struct nfs4_ol_stateid *stp, *st_next;
struct nfs4_ol_stateid *lst, *lst_next;
u64 count = 0;
spin_lock(&clp->cl_lock);
list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
list_for_each_entry_safe(stp, st_next,
&oop->oo_owner.so_stateids, st_perstateowner) {
list_for_each_entry_safe(lst, lst_next,
&stp->st_locks, st_locks) {
if (func) {
if (func(lst))
nfsd_inject_add_lock_to_list(lst,
collect);
}
++count;
/*
* Despite the fact that these functions deal
* with 64-bit integers for "count", we must
* ensure that it doesn't blow up the
* clp->cl_refcount. Throw a warning if we
* start to approach INT_MAX here.
*/
WARN_ON_ONCE(count == (INT_MAX / 2));
if (count == max)
goto out;
}
}
}
out:
spin_unlock(&clp->cl_lock);
return count;
}
static u64
nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
u64 max)
{
return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
}
static u64
nfsd_print_client_locks(struct nfs4_client *clp)
{
u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
nfsd_print_count(clp, count, "locked files");
return count;
}
u64
nfsd_inject_print_locks(void)
{
struct nfs4_client *clp;
u64 count = 0;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
if (!nfsd_netns_ready(nn))
return 0;
spin_lock(&nn->client_lock);
list_for_each_entry(clp, &nn->client_lru, cl_lru)
count += nfsd_print_client_locks(clp);
spin_unlock(&nn->client_lock);
return count;
}
static void
nfsd_reap_locks(struct list_head *reaplist)
{
struct nfs4_client *clp;
struct nfs4_ol_stateid *stp, *next;
list_for_each_entry_safe(stp, next, reaplist, st_locks) {
list_del_init(&stp->st_locks);
clp = stp->st_stid.sc_client;
nfs4_put_stid(&stp->st_stid);
put_client(clp);
}
}
u64
nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
{
unsigned int count = 0;
struct nfs4_client *clp;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
LIST_HEAD(reaplist);
if (!nfsd_netns_ready(nn))
return count;
spin_lock(&nn->client_lock);
clp = nfsd_find_client(addr, addr_size);
if (clp)
count = nfsd_collect_client_locks(clp, &reaplist, 0);
spin_unlock(&nn->client_lock);
nfsd_reap_locks(&reaplist);
return count;
}
u64
nfsd_inject_forget_locks(u64 max)
{
u64 count = 0;
struct nfs4_client *clp;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
LIST_HEAD(reaplist);
if (!nfsd_netns_ready(nn))
return count;
spin_lock(&nn->client_lock);
list_for_each_entry(clp, &nn->client_lru, cl_lru) {
count += nfsd_collect_client_locks(clp, &reaplist, max - count);
if (max != 0 && count >= max)
break;
}
spin_unlock(&nn->client_lock);
nfsd_reap_locks(&reaplist);
return count;
}
static u64
nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
struct list_head *collect,
void (*func)(struct nfs4_openowner *))
{
struct nfs4_openowner *oop, *next;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
u64 count = 0;
lockdep_assert_held(&nn->client_lock);
spin_lock(&clp->cl_lock);
list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
if (func) {
func(oop);
if (collect) {
atomic_inc(&clp->cl_refcount);
list_add(&oop->oo_perclient, collect);
}
}
++count;
/*
* Despite the fact that these functions deal with
* 64-bit integers for "count", we must ensure that
* it doesn't blow up the clp->cl_refcount. Throw a
* warning if we start to approach INT_MAX here.
*/
WARN_ON_ONCE(count == (INT_MAX / 2));
if (count == max)
break;
}
spin_unlock(&clp->cl_lock);
return count;
}
static u64
nfsd_print_client_openowners(struct nfs4_client *clp)
{
u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
nfsd_print_count(clp, count, "openowners");
return count;
}
static u64
nfsd_collect_client_openowners(struct nfs4_client *clp,
struct list_head *collect, u64 max)
{
return nfsd_foreach_client_openowner(clp, max, collect,
unhash_openowner_locked);
}
u64
nfsd_inject_print_openowners(void)
{
struct nfs4_client *clp;
u64 count = 0;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
if (!nfsd_netns_ready(nn))
return 0;
spin_lock(&nn->client_lock);
list_for_each_entry(clp, &nn->client_lru, cl_lru)
count += nfsd_print_client_openowners(clp);
spin_unlock(&nn->client_lock);
return count;
}
static void
nfsd_reap_openowners(struct list_head *reaplist)
{
struct nfs4_client *clp;
struct nfs4_openowner *oop, *next;
list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
list_del_init(&oop->oo_perclient);
clp = oop->oo_owner.so_client;
release_openowner(oop);
put_client(clp);
}
}
u64
nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
size_t addr_size)
{
unsigned int count = 0;
struct nfs4_client *clp;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
LIST_HEAD(reaplist);
if (!nfsd_netns_ready(nn))
return count;
spin_lock(&nn->client_lock);
clp = nfsd_find_client(addr, addr_size);
if (clp)
count = nfsd_collect_client_openowners(clp, &reaplist, 0);
spin_unlock(&nn->client_lock);
nfsd_reap_openowners(&reaplist);
return count;
}
u64
nfsd_inject_forget_openowners(u64 max)
{
u64 count = 0;
struct nfs4_client *clp;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
LIST_HEAD(reaplist);
if (!nfsd_netns_ready(nn))
return count;
spin_lock(&nn->client_lock);
list_for_each_entry(clp, &nn->client_lru, cl_lru) {
count += nfsd_collect_client_openowners(clp, &reaplist,
max - count);
if (max != 0 && count >= max)
break;
}
spin_unlock(&nn->client_lock);
nfsd_reap_openowners(&reaplist);
return count;
}
static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
struct list_head *victims)
{
struct nfs4_delegation *dp, *next;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
u64 count = 0;
lockdep_assert_held(&nn->client_lock);
spin_lock(&state_lock);
list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
if (victims) {
/*
* It's not safe to mess with delegations that have a
* non-zero dl_time. They might have already been broken
* and could be processed by the laundromat outside of
* the state_lock. Just leave them be.
*/
if (dp->dl_time != 0)
continue;
atomic_inc(&clp->cl_refcount);
WARN_ON(!unhash_delegation_locked(dp));
list_add(&dp->dl_recall_lru, victims);
}
++count;
/*
* Despite the fact that these functions deal with
* 64-bit integers for "count", we must ensure that
* it doesn't blow up the clp->cl_refcount. Throw a
* warning if we start to approach INT_MAX here.
*/
WARN_ON_ONCE(count == (INT_MAX / 2));
if (count == max)
break;
}
spin_unlock(&state_lock);
return count;
}
static u64
nfsd_print_client_delegations(struct nfs4_client *clp)
{
u64 count = nfsd_find_all_delegations(clp, 0, NULL);
nfsd_print_count(clp, count, "delegations");
return count;
}
u64
nfsd_inject_print_delegations(void)
{
struct nfs4_client *clp;
u64 count = 0;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
if (!nfsd_netns_ready(nn))
return 0;
spin_lock(&nn->client_lock);
list_for_each_entry(clp, &nn->client_lru, cl_lru)
count += nfsd_print_client_delegations(clp);
spin_unlock(&nn->client_lock);
return count;
}
static void
nfsd_forget_delegations(struct list_head *reaplist)
{
struct nfs4_client *clp;
struct nfs4_delegation *dp, *next;
list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
list_del_init(&dp->dl_recall_lru);
clp = dp->dl_stid.sc_client;
revoke_delegation(dp);
put_client(clp);
}
}
u64
nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
size_t addr_size)
{
u64 count = 0;
struct nfs4_client *clp;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
LIST_HEAD(reaplist);
if (!nfsd_netns_ready(nn))
return count;
spin_lock(&nn->client_lock);
clp = nfsd_find_client(addr, addr_size);
if (clp)
count = nfsd_find_all_delegations(clp, 0, &reaplist);
spin_unlock(&nn->client_lock);
nfsd_forget_delegations(&reaplist);
return count;
}
u64
nfsd_inject_forget_delegations(u64 max)
{
u64 count = 0;
struct nfs4_client *clp;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
LIST_HEAD(reaplist);
if (!nfsd_netns_ready(nn))
return count;
spin_lock(&nn->client_lock);
list_for_each_entry(clp, &nn->client_lru, cl_lru) {
count += nfsd_find_all_delegations(clp, max - count, &reaplist);
if (max != 0 && count >= max)
break;
}
spin_unlock(&nn->client_lock);
nfsd_forget_delegations(&reaplist);
return count;
}
static void
nfsd_recall_delegations(struct list_head *reaplist)
{
struct nfs4_client *clp;
struct nfs4_delegation *dp, *next;
list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
list_del_init(&dp->dl_recall_lru);
clp = dp->dl_stid.sc_client;
/*
* We skipped all entries that had a zero dl_time before,
* so we can now reset the dl_time back to 0. If a delegation
* break comes in now, then it won't make any difference since
* we're recalling it either way.
*/
spin_lock(&state_lock);
dp->dl_time = 0;
spin_unlock(&state_lock);
nfsd_break_one_deleg(dp);
put_client(clp);
}
}
u64
nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
size_t addr_size)
{
u64 count = 0;
struct nfs4_client *clp;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
LIST_HEAD(reaplist);
if (!nfsd_netns_ready(nn))
return count;
spin_lock(&nn->client_lock);
clp = nfsd_find_client(addr, addr_size);
if (clp)
count = nfsd_find_all_delegations(clp, 0, &reaplist);
spin_unlock(&nn->client_lock);
nfsd_recall_delegations(&reaplist);
return count;
}
u64
nfsd_inject_recall_delegations(u64 max)
{
u64 count = 0;
struct nfs4_client *clp, *next;
struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
nfsd_net_id);
LIST_HEAD(reaplist);
if (!nfsd_netns_ready(nn))
return count;
spin_lock(&nn->client_lock);
list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
count += nfsd_find_all_delegations(clp, max - count, &reaplist);
if (max != 0 && ++count >= max)
break;
}
spin_unlock(&nn->client_lock);
nfsd_recall_delegations(&reaplist);
return count;
}
#endif /* CONFIG_NFSD_FAULT_INJECTION */
/*
* Since the lifetime of a delegation isn't limited to that of an open, a
* client may quite reasonably hang on to a delegation as long as it has
* the inode cached. This becomes an obvious problem the first time a
* client's inode cache approaches the size of the server's total memory.
*
* For now we avoid this problem by imposing a hard limit on the number
* of delegations, which varies according to the server's memory size.
*/
static void
set_max_delegations(void)
{
/*
* Allow at most 4 delegations per megabyte of RAM. Quick
* estimates suggest that in the worst case (where every delegation
* is for a different inode), a delegation could take about 1.5K,
* giving a worst case usage of about 6% of memory.
*/
max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
}
static int nfs4_state_create_net(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int i;
nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
CLIENT_HASH_SIZE, GFP_KERNEL);
if (!nn->conf_id_hashtbl)
goto err;
nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
CLIENT_HASH_SIZE, GFP_KERNEL);
if (!nn->unconf_id_hashtbl)
goto err_unconf_id;
nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
SESSION_HASH_SIZE, GFP_KERNEL);
if (!nn->sessionid_hashtbl)
goto err_sessionid;
for (i = 0; i < CLIENT_HASH_SIZE; i++) {
INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
}
for (i = 0; i < SESSION_HASH_SIZE; i++)
INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
nn->conf_name_tree = RB_ROOT;
nn->unconf_name_tree = RB_ROOT;
INIT_LIST_HEAD(&nn->client_lru);
INIT_LIST_HEAD(&nn->close_lru);
INIT_LIST_HEAD(&nn->del_recall_lru);
spin_lock_init(&nn->client_lock);
spin_lock_init(&nn->blocked_locks_lock);
INIT_LIST_HEAD(&nn->blocked_locks_lru);
INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
get_net(net);
return 0;
err_sessionid:
kfree(nn->unconf_id_hashtbl);
err_unconf_id:
kfree(nn->conf_id_hashtbl);
err:
return -ENOMEM;
}
static void
nfs4_state_destroy_net(struct net *net)
{
int i;
struct nfs4_client *clp = NULL;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
for (i = 0; i < CLIENT_HASH_SIZE; i++) {
while (!list_empty(&nn->conf_id_hashtbl[i])) {
clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
destroy_client(clp);
}
}
for (i = 0; i < CLIENT_HASH_SIZE; i++) {
while (!list_empty(&nn->unconf_id_hashtbl[i])) {
clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
destroy_client(clp);
}
}
kfree(nn->sessionid_hashtbl);
kfree(nn->unconf_id_hashtbl);
kfree(nn->conf_id_hashtbl);
put_net(net);
}
int
nfs4_state_start_net(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int ret;
ret = nfs4_state_create_net(net);
if (ret)
return ret;
nn->boot_time = get_seconds();
nn->grace_ended = false;
nn->nfsd4_manager.block_opens = true;
locks_start_grace(net, &nn->nfsd4_manager);
nfsd4_client_tracking_init(net);
printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
nn->nfsd4_grace, net);
queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
return 0;
}
/* initialization to perform when the nfsd service is started: */
int
nfs4_state_start(void)
{
int ret;
ret = set_callback_cred();
if (ret)
return ret;
laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
if (laundry_wq == NULL) {
ret = -ENOMEM;
goto out_cleanup_cred;
}
ret = nfsd4_create_callback_queue();
if (ret)
goto out_free_laundry;
set_max_delegations();
return 0;
out_free_laundry:
destroy_workqueue(laundry_wq);
out_cleanup_cred:
cleanup_callback_cred();
return ret;
}
void
nfs4_state_shutdown_net(struct net *net)
{
struct nfs4_delegation *dp = NULL;
struct list_head *pos, *next, reaplist;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct nfsd4_blocked_lock *nbl;
cancel_delayed_work_sync(&nn->laundromat_work);
locks_end_grace(&nn->nfsd4_manager);
INIT_LIST_HEAD(&reaplist);
spin_lock(&state_lock);
list_for_each_safe(pos, next, &nn->del_recall_lru) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
WARN_ON(!unhash_delegation_locked(dp));
list_add(&dp->dl_recall_lru, &reaplist);
}
spin_unlock(&state_lock);
list_for_each_safe(pos, next, &reaplist) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
list_del_init(&dp->dl_recall_lru);
put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_deleg_lease(dp->dl_stid.sc_file);
nfs4_put_stid(&dp->dl_stid);
}
BUG_ON(!list_empty(&reaplist));
spin_lock(&nn->blocked_locks_lock);
while (!list_empty(&nn->blocked_locks_lru)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
struct nfsd4_blocked_lock, nbl_lru);
list_move(&nbl->nbl_lru, &reaplist);
list_del_init(&nbl->nbl_list);
}
spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
struct nfsd4_blocked_lock, nbl_lru);
list_del_init(&nbl->nbl_lru);
posix_unblock_lock(&nbl->nbl_lock);
free_blocked_lock(nbl);
}
nfsd4_client_tracking_exit(net);
nfs4_state_destroy_net(net);
}
void
nfs4_state_shutdown(void)
{
destroy_workqueue(laundry_wq);
nfsd4_destroy_callback_queue();
cleanup_callback_cred();
}
static void
get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
{
if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
}
static void
put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
{
if (cstate->minorversion) {
memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
}
}
void
clear_current_stateid(struct nfsd4_compound_state *cstate)
{
CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
}
/*
* functions to set current state id
*/
void
nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
{
put_stateid(cstate, &odp->od_stateid);
}
void
nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
{
put_stateid(cstate, &open->op_stateid);
}
void
nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
{
put_stateid(cstate, &close->cl_stateid);
}
void
nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
{
put_stateid(cstate, &lock->lk_resp_stateid);
}
/*
* functions to consume current state id
*/
void
nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
{
get_stateid(cstate, &odp->od_stateid);
}
void
nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
{
get_stateid(cstate, &drp->dr_stateid);
}
void
nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
{
get_stateid(cstate, &fsp->fr_stateid);
}
void
nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
{
get_stateid(cstate, &setattr->sa_stateid);
}
void
nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
{
get_stateid(cstate, &close->cl_stateid);
}
void
nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
{
get_stateid(cstate, &locku->lu_stateid);
}
void
nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
{
get_stateid(cstate, &read->rd_stateid);
}
void
nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
{
get_stateid(cstate, &write->wr_stateid);
}
| ./CrossVul/dataset_final_sorted/CWE-404/c/good_3351_5 |
crossvul-cpp_data_bad_3351_4 | /*
* Server-side procedures for NFSv4.
*
* Copyright (c) 2002 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <kmsmith@umich.edu>
* Andy Adamson <andros@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/file.h>
#include <linux/falloc.h>
#include <linux/slab.h>
#include "idmap.h"
#include "cache.h"
#include "xdr4.h"
#include "vfs.h"
#include "current_stateid.h"
#include "netns.h"
#include "acl.h"
#include "pnfs.h"
#include "trace.h"
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
#include <linux/security.h>
static inline void
nfsd4_security_inode_setsecctx(struct svc_fh *resfh, struct xdr_netobj *label, u32 *bmval)
{
struct inode *inode = d_inode(resfh->fh_dentry);
int status;
inode_lock(inode);
status = security_inode_setsecctx(resfh->fh_dentry,
label->data, label->len);
inode_unlock(inode);
if (status)
/*
* XXX: We should really fail the whole open, but we may
* already have created a new file, so it may be too
* late. For now this seems the least of evils:
*/
bmval[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
return;
}
#else
static inline void
nfsd4_security_inode_setsecctx(struct svc_fh *resfh, struct xdr_netobj *label, u32 *bmval)
{ }
#endif
#define NFSDDBG_FACILITY NFSDDBG_PROC
static u32 nfsd_attrmask[] = {
NFSD_WRITEABLE_ATTRS_WORD0,
NFSD_WRITEABLE_ATTRS_WORD1,
NFSD_WRITEABLE_ATTRS_WORD2
};
static u32 nfsd41_ex_attrmask[] = {
NFSD_SUPPATTR_EXCLCREAT_WORD0,
NFSD_SUPPATTR_EXCLCREAT_WORD1,
NFSD_SUPPATTR_EXCLCREAT_WORD2
};
static __be32
check_attr_support(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
u32 *bmval, u32 *writable)
{
struct dentry *dentry = cstate->current_fh.fh_dentry;
struct svc_export *exp = cstate->current_fh.fh_export;
if (!nfsd_attrs_supported(cstate->minorversion, bmval))
return nfserr_attrnotsupp;
if ((bmval[0] & FATTR4_WORD0_ACL) && !IS_POSIXACL(d_inode(dentry)))
return nfserr_attrnotsupp;
if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) &&
!(exp->ex_flags & NFSEXP_SECURITY_LABEL))
return nfserr_attrnotsupp;
if (writable && !bmval_is_subset(bmval, writable))
return nfserr_inval;
if (writable && (bmval[2] & FATTR4_WORD2_MODE_UMASK) &&
(bmval[1] & FATTR4_WORD1_MODE))
return nfserr_inval;
return nfs_ok;
}
static __be32
nfsd4_check_open_attributes(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
{
__be32 status = nfs_ok;
if (open->op_create == NFS4_OPEN_CREATE) {
if (open->op_createmode == NFS4_CREATE_UNCHECKED
|| open->op_createmode == NFS4_CREATE_GUARDED)
status = check_attr_support(rqstp, cstate,
open->op_bmval, nfsd_attrmask);
else if (open->op_createmode == NFS4_CREATE_EXCLUSIVE4_1)
status = check_attr_support(rqstp, cstate,
open->op_bmval, nfsd41_ex_attrmask);
}
return status;
}
static int
is_create_with_attrs(struct nfsd4_open *open)
{
return open->op_create == NFS4_OPEN_CREATE
&& (open->op_createmode == NFS4_CREATE_UNCHECKED
|| open->op_createmode == NFS4_CREATE_GUARDED
|| open->op_createmode == NFS4_CREATE_EXCLUSIVE4_1);
}
/*
* if error occurs when setting the acl, just clear the acl bit
* in the returned attr bitmap.
*/
static void
do_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct nfs4_acl *acl, u32 *bmval)
{
__be32 status;
status = nfsd4_set_nfs4_acl(rqstp, fhp, acl);
if (status)
/*
* We should probably fail the whole open at this point,
* but we've already created the file, so it's too late;
* So this seems the least of evils:
*/
bmval[0] &= ~FATTR4_WORD0_ACL;
}
static inline void
fh_dup2(struct svc_fh *dst, struct svc_fh *src)
{
fh_put(dst);
dget(src->fh_dentry);
if (src->fh_export)
exp_get(src->fh_export);
*dst = *src;
}
static __be32
do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open, int accmode)
{
__be32 status;
if (open->op_truncate &&
!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
return nfserr_inval;
accmode |= NFSD_MAY_READ_IF_EXEC;
if (open->op_share_access & NFS4_SHARE_ACCESS_READ)
accmode |= NFSD_MAY_READ;
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
accmode |= (NFSD_MAY_WRITE | NFSD_MAY_TRUNC);
if (open->op_share_deny & NFS4_SHARE_DENY_READ)
accmode |= NFSD_MAY_WRITE;
status = fh_verify(rqstp, current_fh, S_IFREG, accmode);
return status;
}
static __be32 nfsd_check_obj_isreg(struct svc_fh *fh)
{
umode_t mode = d_inode(fh->fh_dentry)->i_mode;
if (S_ISREG(mode))
return nfs_ok;
if (S_ISDIR(mode))
return nfserr_isdir;
/*
* Using err_symlink as our catch-all case may look odd; but
* there's no other obvious error for this case in 4.0, and we
* happen to know that it will cause the linux v4 client to do
* the right thing on attempts to open something other than a
* regular file.
*/
return nfserr_symlink;
}
static void nfsd4_set_open_owner_reply_cache(struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh *resfh)
{
if (nfsd4_has_session(cstate))
return;
fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
&resfh->fh_handle);
}
static __be32
do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh **resfh)
{
struct svc_fh *current_fh = &cstate->current_fh;
int accmode;
__be32 status;
*resfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
if (!*resfh)
return nfserr_jukebox;
fh_init(*resfh, NFS4_FHSIZE);
open->op_truncate = 0;
if (open->op_create) {
/* FIXME: check session persistence and pnfs flags.
* The nfsv4.1 spec requires the following semantics:
*
* Persistent | pNFS | Server REQUIRED | Client Allowed
* Reply Cache | server | |
* -------------+--------+-----------------+--------------------
* no | no | EXCLUSIVE4_1 | EXCLUSIVE4_1
* | | | (SHOULD)
* | | and EXCLUSIVE4 | or EXCLUSIVE4
* | | | (SHOULD NOT)
* no | yes | EXCLUSIVE4_1 | EXCLUSIVE4_1
* yes | no | GUARDED4 | GUARDED4
* yes | yes | GUARDED4 | GUARDED4
*/
/*
* Note: create modes (UNCHECKED,GUARDED...) are the same
* in NFSv4 as in v3 except EXCLUSIVE4_1.
*/
status = do_nfsd_create(rqstp, current_fh, open->op_fname.data,
open->op_fname.len, &open->op_iattr,
*resfh, open->op_createmode,
(u32 *)open->op_verf.data,
&open->op_truncate, &open->op_created);
if (!status && open->op_label.len)
nfsd4_security_inode_setsecctx(*resfh, &open->op_label, open->op_bmval);
/*
* Following rfc 3530 14.2.16, and rfc 5661 18.16.4
* use the returned bitmask to indicate which attributes
* we used to store the verifier:
*/
if (nfsd_create_is_exclusive(open->op_createmode) && status == 0)
open->op_bmval[1] |= (FATTR4_WORD1_TIME_ACCESS |
FATTR4_WORD1_TIME_MODIFY);
} else
/*
* Note this may exit with the parent still locked.
* We will hold the lock until nfsd4_open's final
* lookup, to prevent renames or unlinks until we've had
* a chance to an acquire a delegation if appropriate.
*/
status = nfsd_lookup(rqstp, current_fh,
open->op_fname.data, open->op_fname.len, *resfh);
if (status)
goto out;
status = nfsd_check_obj_isreg(*resfh);
if (status)
goto out;
if (is_create_with_attrs(open) && open->op_acl != NULL)
do_set_nfs4_acl(rqstp, *resfh, open->op_acl, open->op_bmval);
nfsd4_set_open_owner_reply_cache(cstate, open, *resfh);
accmode = NFSD_MAY_NOP;
if (open->op_created ||
open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
accmode |= NFSD_MAY_OWNER_OVERRIDE;
status = do_open_permission(rqstp, *resfh, open, accmode);
set_change_info(&open->op_cinfo, current_fh);
out:
return status;
}
static __be32
do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
{
struct svc_fh *current_fh = &cstate->current_fh;
__be32 status;
int accmode = 0;
/* We don't know the target directory, and therefore can not
* set the change info
*/
memset(&open->op_cinfo, 0, sizeof(struct nfsd4_change_info));
nfsd4_set_open_owner_reply_cache(cstate, open, current_fh);
open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
(open->op_iattr.ia_size == 0);
/*
* In the delegation case, the client is telling us about an
* open that it *already* performed locally, some time ago. We
* should let it succeed now if possible.
*
* In the case of a CLAIM_FH open, on the other hand, the client
* may be counting on us to enforce permissions (the Linux 4.1
* client uses this for normal opens, for example).
*/
if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH)
accmode = NFSD_MAY_OWNER_OVERRIDE;
status = do_open_permission(rqstp, current_fh, open, accmode);
return status;
}
static void
copy_clientid(clientid_t *clid, struct nfsd4_session *session)
{
struct nfsd4_sessionid *sid =
(struct nfsd4_sessionid *)session->se_sessionid.data;
clid->cl_boot = sid->clientid.cl_boot;
clid->cl_id = sid->clientid.cl_id;
}
static __be32
nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_open *open)
{
__be32 status;
struct svc_fh *resfh = NULL;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dprintk("NFSD: nfsd4_open filename %.*s op_openowner %p\n",
(int)open->op_fname.len, open->op_fname.data,
open->op_openowner);
/* This check required by spec. */
if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL)
return nfserr_inval;
open->op_created = 0;
/*
* RFC5661 18.51.3
* Before RECLAIM_COMPLETE done, server should deny new lock
*/
if (nfsd4_has_session(cstate) &&
!test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
&cstate->session->se_client->cl_flags) &&
open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
return nfserr_grace;
if (nfsd4_has_session(cstate))
copy_clientid(&open->op_clientid, cstate->session);
/* check seqid for replay. set nfs4_owner */
status = nfsd4_process_open1(cstate, open, nn);
if (status == nfserr_replay_me) {
struct nfs4_replay *rp = &open->op_openowner->oo_owner.so_replay;
fh_put(&cstate->current_fh);
fh_copy_shallow(&cstate->current_fh.fh_handle,
&rp->rp_openfh);
status = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP);
if (status)
dprintk("nfsd4_open: replay failed"
" restoring previous filehandle\n");
else
status = nfserr_replay_me;
}
if (status)
goto out;
if (open->op_xdr_error) {
status = open->op_xdr_error;
goto out;
}
status = nfsd4_check_open_attributes(rqstp, cstate, open);
if (status)
goto out;
/* Openowner is now set, so sequence id will get bumped. Now we need
* these checks before we do any creates: */
status = nfserr_grace;
if (opens_in_grace(net) && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
goto out;
status = nfserr_no_grace;
if (!opens_in_grace(net) && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
goto out;
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
case NFS4_OPEN_CLAIM_NULL:
status = do_open_lookup(rqstp, cstate, open, &resfh);
if (status)
goto out;
break;
case NFS4_OPEN_CLAIM_PREVIOUS:
status = nfs4_check_open_reclaim(&open->op_clientid,
cstate, nn);
if (status)
goto out;
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
case NFS4_OPEN_CLAIM_FH:
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
status = do_open_fhandle(rqstp, cstate, open);
if (status)
goto out;
resfh = &cstate->current_fh;
break;
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
case NFS4_OPEN_CLAIM_DELEGATE_PREV:
dprintk("NFSD: unsupported OPEN claim type %d\n",
open->op_claim_type);
status = nfserr_notsupp;
goto out;
default:
dprintk("NFSD: Invalid OPEN claim type %d\n",
open->op_claim_type);
status = nfserr_inval;
goto out;
}
/*
* nfsd4_process_open2() does the actual opening of the file. If
* successful, it (1) truncates the file if open->op_truncate was
* set, (2) sets open->op_stateid, (3) sets open->op_delegation.
*/
status = nfsd4_process_open2(rqstp, resfh, open);
WARN(status && open->op_created,
"nfsd4_process_open2 failed to open newly-created file! status=%u\n",
be32_to_cpu(status));
out:
if (resfh && resfh != &cstate->current_fh) {
fh_dup2(&cstate->current_fh, resfh);
fh_put(resfh);
kfree(resfh);
}
nfsd4_cleanup_open_state(cstate, open);
nfsd4_bump_seqid(cstate, status);
return status;
}
/*
* OPEN is the only seqid-mutating operation whose decoding can fail
* with a seqid-mutating error (specifically, decoding of user names in
* the attributes). Therefore we have to do some processing to look up
* the stateowner so that we can bump the seqid.
*/
static __be32 nfsd4_open_omfg(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_op *op)
{
struct nfsd4_open *open = (struct nfsd4_open *)&op->u;
if (!seqid_mutating_err(ntohl(op->status)))
return op->status;
if (nfsd4_has_session(cstate))
return op->status;
open->op_xdr_error = op->status;
return nfsd4_open(rqstp, cstate, open);
}
/*
* filehandle-manipulating ops.
*/
static __be32
nfsd4_getfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct svc_fh **getfh)
{
if (!cstate->current_fh.fh_dentry)
return nfserr_nofilehandle;
*getfh = &cstate->current_fh;
return nfs_ok;
}
static __be32
nfsd4_putfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_putfh *putfh)
{
fh_put(&cstate->current_fh);
cstate->current_fh.fh_handle.fh_size = putfh->pf_fhlen;
memcpy(&cstate->current_fh.fh_handle.fh_base, putfh->pf_fhval,
putfh->pf_fhlen);
return fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_BYPASS_GSS);
}
static __be32
nfsd4_putrootfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
void *arg)
{
__be32 status;
fh_put(&cstate->current_fh);
status = exp_pseudoroot(rqstp, &cstate->current_fh);
return status;
}
static __be32
nfsd4_restorefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
void *arg)
{
if (!cstate->save_fh.fh_dentry)
return nfserr_restorefh;
fh_dup2(&cstate->current_fh, &cstate->save_fh);
if (HAS_STATE_ID(cstate, SAVED_STATE_ID_FLAG)) {
memcpy(&cstate->current_stateid, &cstate->save_stateid, sizeof(stateid_t));
SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
}
return nfs_ok;
}
static __be32
nfsd4_savefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
void *arg)
{
if (!cstate->current_fh.fh_dentry)
return nfserr_nofilehandle;
fh_dup2(&cstate->save_fh, &cstate->current_fh);
if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG)) {
memcpy(&cstate->save_stateid, &cstate->current_stateid, sizeof(stateid_t));
SET_STATE_ID(cstate, SAVED_STATE_ID_FLAG);
}
return nfs_ok;
}
/*
* misc nfsv4 ops
*/
static __be32
nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_access *access)
{
if (access->ac_req_access & ~NFS3_ACCESS_FULL)
return nfserr_inval;
access->ac_resp_access = access->ac_req_access;
return nfsd_access(rqstp, &cstate->current_fh, &access->ac_resp_access,
&access->ac_supported);
}
static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net)
{
__be32 verf[2];
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
/*
* This is opaque to client, so no need to byte-swap. Use
* __force to keep sparse happy
*/
verf[0] = (__force __be32)nn->nfssvc_boot.tv_sec;
verf[1] = (__force __be32)nn->nfssvc_boot.tv_usec;
memcpy(verifier->data, verf, sizeof(verifier->data));
}
static __be32
nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_commit *commit)
{
gen_boot_verifier(&commit->co_verf, SVC_NET(rqstp));
return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
commit->co_count);
}
static __be32
nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_create *create)
{
struct svc_fh resfh;
__be32 status;
dev_t rdev;
fh_init(&resfh, NFS4_FHSIZE);
status = fh_verify(rqstp, &cstate->current_fh, S_IFDIR, NFSD_MAY_NOP);
if (status)
return status;
status = check_attr_support(rqstp, cstate, create->cr_bmval,
nfsd_attrmask);
if (status)
return status;
switch (create->cr_type) {
case NF4LNK:
status = nfsd_symlink(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
create->cr_data, &resfh);
break;
case NF4BLK:
rdev = MKDEV(create->cr_specdata1, create->cr_specdata2);
if (MAJOR(rdev) != create->cr_specdata1 ||
MINOR(rdev) != create->cr_specdata2)
return nfserr_inval;
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
&create->cr_iattr, S_IFBLK, rdev, &resfh);
break;
case NF4CHR:
rdev = MKDEV(create->cr_specdata1, create->cr_specdata2);
if (MAJOR(rdev) != create->cr_specdata1 ||
MINOR(rdev) != create->cr_specdata2)
return nfserr_inval;
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
&create->cr_iattr,S_IFCHR, rdev, &resfh);
break;
case NF4SOCK:
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
&create->cr_iattr, S_IFSOCK, 0, &resfh);
break;
case NF4FIFO:
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
&create->cr_iattr, S_IFIFO, 0, &resfh);
break;
case NF4DIR:
create->cr_iattr.ia_valid &= ~ATTR_SIZE;
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
&create->cr_iattr, S_IFDIR, 0, &resfh);
break;
default:
status = nfserr_badtype;
}
if (status)
goto out;
if (create->cr_label.len)
nfsd4_security_inode_setsecctx(&resfh, &create->cr_label, create->cr_bmval);
if (create->cr_acl != NULL)
do_set_nfs4_acl(rqstp, &resfh, create->cr_acl,
create->cr_bmval);
fh_unlock(&cstate->current_fh);
set_change_info(&create->cr_cinfo, &cstate->current_fh);
fh_dup2(&cstate->current_fh, &resfh);
out:
fh_put(&resfh);
return status;
}
static __be32
nfsd4_getattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_getattr *getattr)
{
__be32 status;
status = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP);
if (status)
return status;
if (getattr->ga_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1)
return nfserr_inval;
getattr->ga_bmval[0] &= nfsd_suppattrs[cstate->minorversion][0];
getattr->ga_bmval[1] &= nfsd_suppattrs[cstate->minorversion][1];
getattr->ga_bmval[2] &= nfsd_suppattrs[cstate->minorversion][2];
getattr->ga_fhp = &cstate->current_fh;
return nfs_ok;
}
static __be32
nfsd4_link(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_link *link)
{
__be32 status = nfserr_nofilehandle;
if (!cstate->save_fh.fh_dentry)
return status;
status = nfsd_link(rqstp, &cstate->current_fh,
link->li_name, link->li_namelen, &cstate->save_fh);
if (!status)
set_change_info(&link->li_cinfo, &cstate->current_fh);
return status;
}
static __be32 nfsd4_do_lookupp(struct svc_rqst *rqstp, struct svc_fh *fh)
{
struct svc_fh tmp_fh;
__be32 ret;
fh_init(&tmp_fh, NFS4_FHSIZE);
ret = exp_pseudoroot(rqstp, &tmp_fh);
if (ret)
return ret;
if (tmp_fh.fh_dentry == fh->fh_dentry) {
fh_put(&tmp_fh);
return nfserr_noent;
}
fh_put(&tmp_fh);
return nfsd_lookup(rqstp, fh, "..", 2, fh);
}
static __be32
nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
void *arg)
{
return nfsd4_do_lookupp(rqstp, &cstate->current_fh);
}
static __be32
nfsd4_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_lookup *lookup)
{
return nfsd_lookup(rqstp, &cstate->current_fh,
lookup->lo_name, lookup->lo_len,
&cstate->current_fh);
}
static __be32
nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_read *read)
{
__be32 status;
read->rd_filp = NULL;
if (read->rd_offset >= OFFSET_MAX)
return nfserr_inval;
/*
* If we do a zero copy read, then a client will see read data
* that reflects the state of the file *after* performing the
* following compound.
*
* To ensure proper ordering, we therefore turn off zero copy if
* the client wants us to do more in this compound:
*/
if (!nfsd4_last_compound_op(rqstp))
clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
/* check stateid */
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&read->rd_stateid, RD_STATE,
&read->rd_filp, &read->rd_tmp_file);
if (status) {
dprintk("NFSD: nfsd4_read: couldn't process stateid!\n");
goto out;
}
status = nfs_ok;
out:
read->rd_rqstp = rqstp;
read->rd_fhp = &cstate->current_fh;
return status;
}
static __be32
nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_readdir *readdir)
{
u64 cookie = readdir->rd_cookie;
static const nfs4_verifier zeroverf;
/* no need to check permission - this will be done in nfsd_readdir() */
if (readdir->rd_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1)
return nfserr_inval;
readdir->rd_bmval[0] &= nfsd_suppattrs[cstate->minorversion][0];
readdir->rd_bmval[1] &= nfsd_suppattrs[cstate->minorversion][1];
readdir->rd_bmval[2] &= nfsd_suppattrs[cstate->minorversion][2];
if ((cookie == 1) || (cookie == 2) ||
(cookie == 0 && memcmp(readdir->rd_verf.data, zeroverf.data, NFS4_VERIFIER_SIZE)))
return nfserr_bad_cookie;
readdir->rd_rqstp = rqstp;
readdir->rd_fhp = &cstate->current_fh;
return nfs_ok;
}
static __be32
nfsd4_readlink(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_readlink *readlink)
{
readlink->rl_rqstp = rqstp;
readlink->rl_fhp = &cstate->current_fh;
return nfs_ok;
}
static __be32
nfsd4_remove(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_remove *remove)
{
__be32 status;
if (opens_in_grace(SVC_NET(rqstp)))
return nfserr_grace;
status = nfsd_unlink(rqstp, &cstate->current_fh, 0,
remove->rm_name, remove->rm_namelen);
if (!status) {
fh_unlock(&cstate->current_fh);
set_change_info(&remove->rm_cinfo, &cstate->current_fh);
}
return status;
}
static __be32
nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_rename *rename)
{
__be32 status = nfserr_nofilehandle;
if (!cstate->save_fh.fh_dentry)
return status;
if (opens_in_grace(SVC_NET(rqstp)) &&
!(cstate->save_fh.fh_export->ex_flags & NFSEXP_NOSUBTREECHECK))
return nfserr_grace;
status = nfsd_rename(rqstp, &cstate->save_fh, rename->rn_sname,
rename->rn_snamelen, &cstate->current_fh,
rename->rn_tname, rename->rn_tnamelen);
if (status)
return status;
set_change_info(&rename->rn_sinfo, &cstate->current_fh);
set_change_info(&rename->rn_tinfo, &cstate->save_fh);
return nfs_ok;
}
static __be32
nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_secinfo *secinfo)
{
struct svc_export *exp;
struct dentry *dentry;
__be32 err;
err = fh_verify(rqstp, &cstate->current_fh, S_IFDIR, NFSD_MAY_EXEC);
if (err)
return err;
err = nfsd_lookup_dentry(rqstp, &cstate->current_fh,
secinfo->si_name, secinfo->si_namelen,
&exp, &dentry);
if (err)
return err;
fh_unlock(&cstate->current_fh);
if (d_really_is_negative(dentry)) {
exp_put(exp);
err = nfserr_noent;
} else
secinfo->si_exp = exp;
dput(dentry);
if (cstate->minorversion)
/* See rfc 5661 section 2.6.3.1.1.8 */
fh_put(&cstate->current_fh);
return err;
}
static __be32
nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_secinfo_no_name *sin)
{
__be32 err;
switch (sin->sin_style) {
case NFS4_SECINFO_STYLE4_CURRENT_FH:
break;
case NFS4_SECINFO_STYLE4_PARENT:
err = nfsd4_do_lookupp(rqstp, &cstate->current_fh);
if (err)
return err;
break;
default:
return nfserr_inval;
}
sin->sin_exp = exp_get(cstate->current_fh.fh_export);
fh_put(&cstate->current_fh);
return nfs_ok;
}
static __be32
nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_setattr *setattr)
{
__be32 status = nfs_ok;
int err;
if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
status = nfs4_preprocess_stateid_op(rqstp, cstate,
&cstate->current_fh, &setattr->sa_stateid,
WR_STATE, NULL, NULL);
if (status) {
dprintk("NFSD: nfsd4_setattr: couldn't process stateid!\n");
return status;
}
}
err = fh_want_write(&cstate->current_fh);
if (err)
return nfserrno(err);
status = nfs_ok;
status = check_attr_support(rqstp, cstate, setattr->sa_bmval,
nfsd_attrmask);
if (status)
goto out;
if (setattr->sa_acl != NULL)
status = nfsd4_set_nfs4_acl(rqstp, &cstate->current_fh,
setattr->sa_acl);
if (status)
goto out;
if (setattr->sa_label.len)
status = nfsd4_set_nfs4_label(rqstp, &cstate->current_fh,
&setattr->sa_label);
if (status)
goto out;
status = nfsd_setattr(rqstp, &cstate->current_fh, &setattr->sa_iattr,
0, (time_t)0);
out:
fh_drop_write(&cstate->current_fh);
return status;
}
static int fill_in_write_vector(struct kvec *vec, struct nfsd4_write *write)
{
int i = 1;
int buflen = write->wr_buflen;
vec[0].iov_base = write->wr_head.iov_base;
vec[0].iov_len = min_t(int, buflen, write->wr_head.iov_len);
buflen -= vec[0].iov_len;
while (buflen) {
vec[i].iov_base = page_address(write->wr_pagelist[i - 1]);
vec[i].iov_len = min_t(int, PAGE_SIZE, buflen);
buflen -= vec[i].iov_len;
i++;
}
return i;
}
static __be32
nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_write *write)
{
stateid_t *stateid = &write->wr_stateid;
struct file *filp = NULL;
__be32 status = nfs_ok;
unsigned long cnt;
int nvecs;
if (write->wr_offset >= OFFSET_MAX)
return nfserr_inval;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
stateid, WR_STATE, &filp, NULL);
if (status) {
dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
return status;
}
cnt = write->wr_buflen;
write->wr_how_written = write->wr_stable_how;
gen_boot_verifier(&write->wr_verifier, SVC_NET(rqstp));
nvecs = fill_in_write_vector(rqstp->rq_vec, write);
WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
status = nfsd_vfs_write(rqstp, &cstate->current_fh, filp,
write->wr_offset, rqstp->rq_vec, nvecs, &cnt,
write->wr_how_written);
fput(filp);
write->wr_bytes_written = cnt;
return status;
}
static __be32
nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
stateid_t *src_stateid, struct file **src,
stateid_t *dst_stateid, struct file **dst)
{
__be32 status;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
src_stateid, RD_STATE, src, NULL);
if (status) {
dprintk("NFSD: %s: couldn't process src stateid!\n", __func__);
goto out;
}
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
dst_stateid, WR_STATE, dst, NULL);
if (status) {
dprintk("NFSD: %s: couldn't process dst stateid!\n", __func__);
goto out_put_src;
}
/* fix up for NFS-specific error code */
if (!S_ISREG(file_inode(*src)->i_mode) ||
!S_ISREG(file_inode(*dst)->i_mode)) {
status = nfserr_wrong_type;
goto out_put_dst;
}
out:
return status;
out_put_dst:
fput(*dst);
out_put_src:
fput(*src);
goto out;
}
static __be32
nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_clone *clone)
{
struct file *src, *dst;
__be32 status;
status = nfsd4_verify_copy(rqstp, cstate, &clone->cl_src_stateid, &src,
&clone->cl_dst_stateid, &dst);
if (status)
goto out;
status = nfsd4_clone_file_range(src, clone->cl_src_pos,
dst, clone->cl_dst_pos, clone->cl_count);
fput(dst);
fput(src);
out:
return status;
}
static __be32
nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_copy *copy)
{
struct file *src, *dst;
__be32 status;
ssize_t bytes;
status = nfsd4_verify_copy(rqstp, cstate, ©->cp_src_stateid, &src,
©->cp_dst_stateid, &dst);
if (status)
goto out;
bytes = nfsd_copy_file_range(src, copy->cp_src_pos,
dst, copy->cp_dst_pos, copy->cp_count);
if (bytes < 0)
status = nfserrno(bytes);
else {
copy->cp_res.wr_bytes_written = bytes;
copy->cp_res.wr_stable_how = NFS_UNSTABLE;
copy->cp_consecutive = 1;
copy->cp_synchronous = 1;
gen_boot_verifier(©->cp_res.wr_verifier, SVC_NET(rqstp));
status = nfs_ok;
}
fput(src);
fput(dst);
out:
return status;
}
static __be32
nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_fallocate *fallocate, int flags)
{
__be32 status = nfserr_notsupp;
struct file *file;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&fallocate->falloc_stateid,
WR_STATE, &file, NULL);
if (status != nfs_ok) {
dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
return status;
}
status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file,
fallocate->falloc_offset,
fallocate->falloc_length,
flags);
fput(file);
return status;
}
static __be32
nfsd4_allocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_fallocate *fallocate)
{
return nfsd4_fallocate(rqstp, cstate, fallocate, 0);
}
static __be32
nfsd4_deallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_fallocate *fallocate)
{
return nfsd4_fallocate(rqstp, cstate, fallocate,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE);
}
static __be32
nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_seek *seek)
{
int whence;
__be32 status;
struct file *file;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&seek->seek_stateid,
RD_STATE, &file, NULL);
if (status) {
dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n");
return status;
}
switch (seek->seek_whence) {
case NFS4_CONTENT_DATA:
whence = SEEK_DATA;
break;
case NFS4_CONTENT_HOLE:
whence = SEEK_HOLE;
break;
default:
status = nfserr_union_notsupp;
goto out;
}
/*
* Note: This call does change file->f_pos, but nothing in NFSD
* should ever file->f_pos.
*/
seek->seek_pos = vfs_llseek(file, seek->seek_offset, whence);
if (seek->seek_pos < 0)
status = nfserrno(seek->seek_pos);
else if (seek->seek_pos >= i_size_read(file_inode(file)))
seek->seek_eof = true;
out:
fput(file);
return status;
}
/* This routine never returns NFS_OK! If there are no other errors, it
* will return NFSERR_SAME or NFSERR_NOT_SAME depending on whether the
* attributes matched. VERIFY is implemented by mapping NFSERR_SAME
* to NFS_OK after the call; NVERIFY by mapping NFSERR_NOT_SAME to NFS_OK.
*/
static __be32
_nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_verify *verify)
{
__be32 *buf, *p;
int count;
__be32 status;
status = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP);
if (status)
return status;
status = check_attr_support(rqstp, cstate, verify->ve_bmval, NULL);
if (status)
return status;
if ((verify->ve_bmval[0] & FATTR4_WORD0_RDATTR_ERROR)
|| (verify->ve_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1))
return nfserr_inval;
if (verify->ve_attrlen & 3)
return nfserr_inval;
/* count in words:
* bitmap_len(1) + bitmap(2) + attr_len(1) = 4
*/
count = 4 + (verify->ve_attrlen >> 2);
buf = kmalloc(count << 2, GFP_KERNEL);
if (!buf)
return nfserr_jukebox;
p = buf;
status = nfsd4_encode_fattr_to_buf(&p, count, &cstate->current_fh,
cstate->current_fh.fh_export,
cstate->current_fh.fh_dentry,
verify->ve_bmval,
rqstp, 0);
/*
* If nfsd4_encode_fattr() ran out of space, assume that's because
* the attributes are longer (hence different) than those given:
*/
if (status == nfserr_resource)
status = nfserr_not_same;
if (status)
goto out_kfree;
/* skip bitmap */
p = buf + 1 + ntohl(buf[0]);
status = nfserr_not_same;
if (ntohl(*p++) != verify->ve_attrlen)
goto out_kfree;
if (!memcmp(p, verify->ve_attrval, verify->ve_attrlen))
status = nfserr_same;
out_kfree:
kfree(buf);
return status;
}
static __be32
nfsd4_nverify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_verify *verify)
{
__be32 status;
status = _nfsd4_verify(rqstp, cstate, verify);
return status == nfserr_not_same ? nfs_ok : status;
}
static __be32
nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_verify *verify)
{
__be32 status;
status = _nfsd4_verify(rqstp, cstate, verify);
return status == nfserr_same ? nfs_ok : status;
}
#ifdef CONFIG_NFSD_PNFS
static const struct nfsd4_layout_ops *
nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type)
{
if (!exp->ex_layout_types) {
dprintk("%s: export does not support pNFS\n", __func__);
return NULL;
}
if (!(exp->ex_layout_types & (1 << layout_type))) {
dprintk("%s: layout type %d not supported\n",
__func__, layout_type);
return NULL;
}
return nfsd4_layout_ops[layout_type];
}
static __be32
nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_getdeviceinfo *gdp)
{
const struct nfsd4_layout_ops *ops;
struct nfsd4_deviceid_map *map;
struct svc_export *exp;
__be32 nfserr;
dprintk("%s: layout_type %u dev_id [0x%llx:0x%x] maxcnt %u\n",
__func__,
gdp->gd_layout_type,
gdp->gd_devid.fsid_idx, gdp->gd_devid.generation,
gdp->gd_maxcount);
map = nfsd4_find_devid_map(gdp->gd_devid.fsid_idx);
if (!map) {
dprintk("%s: couldn't find device ID to export mapping!\n",
__func__);
return nfserr_noent;
}
exp = rqst_exp_find(rqstp, map->fsid_type, map->fsid);
if (IS_ERR(exp)) {
dprintk("%s: could not find device id\n", __func__);
return nfserr_noent;
}
nfserr = nfserr_layoutunavailable;
ops = nfsd4_layout_verify(exp, gdp->gd_layout_type);
if (!ops)
goto out;
nfserr = nfs_ok;
if (gdp->gd_maxcount != 0) {
nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb,
rqstp, cstate->session->se_client, gdp);
}
gdp->gd_notify_types &= ops->notify_types;
out:
exp_put(exp);
return nfserr;
}
static __be32
nfsd4_layoutget(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_layoutget *lgp)
{
struct svc_fh *current_fh = &cstate->current_fh;
const struct nfsd4_layout_ops *ops;
struct nfs4_layout_stateid *ls;
__be32 nfserr;
int accmode;
switch (lgp->lg_seg.iomode) {
case IOMODE_READ:
accmode = NFSD_MAY_READ;
break;
case IOMODE_RW:
accmode = NFSD_MAY_READ | NFSD_MAY_WRITE;
break;
default:
dprintk("%s: invalid iomode %d\n",
__func__, lgp->lg_seg.iomode);
nfserr = nfserr_badiomode;
goto out;
}
nfserr = fh_verify(rqstp, current_fh, 0, accmode);
if (nfserr)
goto out;
nfserr = nfserr_layoutunavailable;
ops = nfsd4_layout_verify(current_fh->fh_export, lgp->lg_layout_type);
if (!ops)
goto out;
/*
* Verify minlength and range as per RFC5661:
* o If loga_length is less than loga_minlength,
* the metadata server MUST return NFS4ERR_INVAL.
* o If the sum of loga_offset and loga_minlength exceeds
* NFS4_UINT64_MAX, and loga_minlength is not
* NFS4_UINT64_MAX, the error NFS4ERR_INVAL MUST result.
* o If the sum of loga_offset and loga_length exceeds
* NFS4_UINT64_MAX, and loga_length is not NFS4_UINT64_MAX,
* the error NFS4ERR_INVAL MUST result.
*/
nfserr = nfserr_inval;
if (lgp->lg_seg.length < lgp->lg_minlength ||
(lgp->lg_minlength != NFS4_MAX_UINT64 &&
lgp->lg_minlength > NFS4_MAX_UINT64 - lgp->lg_seg.offset) ||
(lgp->lg_seg.length != NFS4_MAX_UINT64 &&
lgp->lg_seg.length > NFS4_MAX_UINT64 - lgp->lg_seg.offset))
goto out;
if (lgp->lg_seg.length == 0)
goto out;
nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lgp->lg_sid,
true, lgp->lg_layout_type, &ls);
if (nfserr) {
trace_layout_get_lookup_fail(&lgp->lg_sid);
goto out;
}
nfserr = nfserr_recallconflict;
if (atomic_read(&ls->ls_stid.sc_file->fi_lo_recalls))
goto out_put_stid;
nfserr = ops->proc_layoutget(d_inode(current_fh->fh_dentry),
current_fh, lgp);
if (nfserr)
goto out_put_stid;
nfserr = nfsd4_insert_layout(lgp, ls);
out_put_stid:
mutex_unlock(&ls->ls_mutex);
nfs4_put_stid(&ls->ls_stid);
out:
return nfserr;
}
static __be32
nfsd4_layoutcommit(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_layoutcommit *lcp)
{
const struct nfsd4_layout_seg *seg = &lcp->lc_seg;
struct svc_fh *current_fh = &cstate->current_fh;
const struct nfsd4_layout_ops *ops;
loff_t new_size = lcp->lc_last_wr + 1;
struct inode *inode;
struct nfs4_layout_stateid *ls;
__be32 nfserr;
nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_WRITE);
if (nfserr)
goto out;
nfserr = nfserr_layoutunavailable;
ops = nfsd4_layout_verify(current_fh->fh_export, lcp->lc_layout_type);
if (!ops)
goto out;
inode = d_inode(current_fh->fh_dentry);
nfserr = nfserr_inval;
if (new_size <= seg->offset) {
dprintk("pnfsd: last write before layout segment\n");
goto out;
}
if (new_size > seg->offset + seg->length) {
dprintk("pnfsd: last write beyond layout segment\n");
goto out;
}
if (!lcp->lc_newoffset && new_size > i_size_read(inode)) {
dprintk("pnfsd: layoutcommit beyond EOF\n");
goto out;
}
nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid,
false, lcp->lc_layout_type,
&ls);
if (nfserr) {
trace_layout_commit_lookup_fail(&lcp->lc_sid);
/* fixup error code as per RFC5661 */
if (nfserr == nfserr_bad_stateid)
nfserr = nfserr_badlayout;
goto out;
}
/* LAYOUTCOMMIT does not require any serialization */
mutex_unlock(&ls->ls_mutex);
if (new_size > i_size_read(inode)) {
lcp->lc_size_chg = 1;
lcp->lc_newsize = new_size;
} else {
lcp->lc_size_chg = 0;
}
nfserr = ops->proc_layoutcommit(inode, lcp);
nfs4_put_stid(&ls->ls_stid);
out:
return nfserr;
}
static __be32
nfsd4_layoutreturn(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_layoutreturn *lrp)
{
struct svc_fh *current_fh = &cstate->current_fh;
__be32 nfserr;
nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_NOP);
if (nfserr)
goto out;
nfserr = nfserr_layoutunavailable;
if (!nfsd4_layout_verify(current_fh->fh_export, lrp->lr_layout_type))
goto out;
switch (lrp->lr_seg.iomode) {
case IOMODE_READ:
case IOMODE_RW:
case IOMODE_ANY:
break;
default:
dprintk("%s: invalid iomode %d\n", __func__,
lrp->lr_seg.iomode);
nfserr = nfserr_inval;
goto out;
}
switch (lrp->lr_return_type) {
case RETURN_FILE:
nfserr = nfsd4_return_file_layouts(rqstp, cstate, lrp);
break;
case RETURN_FSID:
case RETURN_ALL:
nfserr = nfsd4_return_client_layouts(rqstp, cstate, lrp);
break;
default:
dprintk("%s: invalid return_type %d\n", __func__,
lrp->lr_return_type);
nfserr = nfserr_inval;
break;
}
out:
return nfserr;
}
#endif /* CONFIG_NFSD_PNFS */
/*
* NULL call.
*/
static __be32
nfsd4_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
{
return nfs_ok;
}
static inline void nfsd4_increment_op_stats(u32 opnum)
{
if (opnum >= FIRST_NFS4_OP && opnum <= LAST_NFS4_OP)
nfsdstats.nfs4_opcount[opnum]++;
}
typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
void *);
typedef u32(*nfsd4op_rsize)(struct svc_rqst *, struct nfsd4_op *op);
typedef void(*stateid_setter)(struct nfsd4_compound_state *, void *);
typedef void(*stateid_getter)(struct nfsd4_compound_state *, void *);
enum nfsd4_op_flags {
ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */
ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */
ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */
/* For rfc 5661 section 2.6.3.1.1: */
OP_HANDLES_WRONGSEC = 1 << 3,
OP_IS_PUTFH_LIKE = 1 << 4,
/*
* These are the ops whose result size we estimate before
* encoding, to avoid performing an op then not being able to
* respond or cache a response. This includes writes and setattrs
* as well as the operations usually called "nonidempotent":
*/
OP_MODIFIES_SOMETHING = 1 << 5,
/*
* Cache compounds containing these ops in the xid-based drc:
* We use the DRC for compounds containing non-idempotent
* operations, *except* those that are 4.1-specific (since
* sessions provide their own EOS), and except for stateful
* operations other than setclientid and setclientid_confirm
* (since sequence numbers provide EOS for open, lock, etc in
* the v4.0 case).
*/
OP_CACHEME = 1 << 6,
/*
* These are ops which clear current state id.
*/
OP_CLEAR_STATEID = 1 << 7,
};
struct nfsd4_operation {
nfsd4op_func op_func;
u32 op_flags;
char *op_name;
/* Try to get response size before operation */
nfsd4op_rsize op_rsize_bop;
stateid_getter op_get_currentstateid;
stateid_setter op_set_currentstateid;
};
static struct nfsd4_operation nfsd4_ops[];
static const char *nfsd4_op_name(unsigned opnum);
/*
* Enforce NFSv4.1 COMPOUND ordering rules:
*
* Also note, enforced elsewhere:
* - SEQUENCE other than as first op results in
* NFS4ERR_SEQUENCE_POS. (Enforced in nfsd4_sequence().)
* - BIND_CONN_TO_SESSION must be the only op in its compound.
* (Enforced in nfsd4_bind_conn_to_session().)
* - DESTROY_SESSION must be the final operation in a compound, if
* sessionid's in SEQUENCE and DESTROY_SESSION are the same.
* (Enforced in nfsd4_destroy_session().)
*/
static __be32 nfs41_check_op_ordering(struct nfsd4_compoundargs *args)
{
struct nfsd4_op *op = &args->ops[0];
/* These ordering requirements don't apply to NFSv4.0: */
if (args->minorversion == 0)
return nfs_ok;
/* This is weird, but OK, not our problem: */
if (args->opcnt == 0)
return nfs_ok;
if (op->status == nfserr_op_illegal)
return nfs_ok;
if (!(nfsd4_ops[op->opnum].op_flags & ALLOWED_AS_FIRST_OP))
return nfserr_op_not_in_session;
if (op->opnum == OP_SEQUENCE)
return nfs_ok;
if (args->opcnt != 1)
return nfserr_not_only_op;
return nfs_ok;
}
static inline struct nfsd4_operation *OPDESC(struct nfsd4_op *op)
{
return &nfsd4_ops[op->opnum];
}
bool nfsd4_cache_this_op(struct nfsd4_op *op)
{
if (op->opnum == OP_ILLEGAL)
return false;
return OPDESC(op)->op_flags & OP_CACHEME;
}
static bool need_wrongsec_check(struct svc_rqst *rqstp)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct nfsd4_compoundargs *argp = rqstp->rq_argp;
struct nfsd4_op *this = &argp->ops[resp->opcnt - 1];
struct nfsd4_op *next = &argp->ops[resp->opcnt];
struct nfsd4_operation *thisd;
struct nfsd4_operation *nextd;
thisd = OPDESC(this);
/*
* Most ops check wronsec on our own; only the putfh-like ops
* have special rules.
*/
if (!(thisd->op_flags & OP_IS_PUTFH_LIKE))
return false;
/*
* rfc 5661 2.6.3.1.1.6: don't bother erroring out a
* put-filehandle operation if we're not going to use the
* result:
*/
if (argp->opcnt == resp->opcnt)
return false;
if (next->opnum == OP_ILLEGAL)
return false;
nextd = OPDESC(next);
/*
* Rest of 2.6.3.1.1: certain operations will return WRONGSEC
* errors themselves as necessary; others should check for them
* now:
*/
return !(nextd->op_flags & OP_HANDLES_WRONGSEC);
}
static void svcxdr_init_encode(struct svc_rqst *rqstp,
struct nfsd4_compoundres *resp)
{
struct xdr_stream *xdr = &resp->xdr;
struct xdr_buf *buf = &rqstp->rq_res;
struct kvec *head = buf->head;
xdr->buf = buf;
xdr->iov = head;
xdr->p = head->iov_base + head->iov_len;
xdr->end = head->iov_base + PAGE_SIZE - rqstp->rq_auth_slack;
/* Tail and page_len should be zero at this point: */
buf->len = buf->head[0].iov_len;
xdr->scratch.iov_len = 0;
xdr->page_ptr = buf->pages - 1;
buf->buflen = PAGE_SIZE * (1 + rqstp->rq_page_end - buf->pages)
- rqstp->rq_auth_slack;
}
/*
* COMPOUND call.
*/
static __be32
nfsd4_proc_compound(struct svc_rqst *rqstp,
struct nfsd4_compoundargs *args,
struct nfsd4_compoundres *resp)
{
struct nfsd4_op *op;
struct nfsd4_operation *opdesc;
struct nfsd4_compound_state *cstate = &resp->cstate;
struct svc_fh *current_fh = &cstate->current_fh;
struct svc_fh *save_fh = &cstate->save_fh;
__be32 status;
svcxdr_init_encode(rqstp, resp);
resp->tagp = resp->xdr.p;
/* reserve space for: taglen, tag, and opcnt */
xdr_reserve_space(&resp->xdr, 8 + args->taglen);
resp->taglen = args->taglen;
resp->tag = args->tag;
resp->rqstp = rqstp;
cstate->minorversion = args->minorversion;
fh_init(current_fh, NFS4_FHSIZE);
fh_init(save_fh, NFS4_FHSIZE);
/*
* Don't use the deferral mechanism for NFSv4; compounds make it
* too hard to avoid non-idempotency problems.
*/
clear_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
/*
* According to RFC3010, this takes precedence over all other errors.
*/
status = nfserr_minor_vers_mismatch;
if (nfsd_minorversion(args->minorversion, NFSD_TEST) <= 0)
goto out;
status = nfs41_check_op_ordering(args);
if (status) {
op = &args->ops[0];
op->status = status;
goto encode_op;
}
while (!status && resp->opcnt < args->opcnt) {
op = &args->ops[resp->opcnt++];
dprintk("nfsv4 compound op #%d/%d: %d (%s)\n",
resp->opcnt, args->opcnt, op->opnum,
nfsd4_op_name(op->opnum));
/*
* The XDR decode routines may have pre-set op->status;
* for example, if there is a miscellaneous XDR error
* it will be set to nfserr_bad_xdr.
*/
if (op->status) {
if (op->opnum == OP_OPEN)
op->status = nfsd4_open_omfg(rqstp, cstate, op);
goto encode_op;
}
opdesc = OPDESC(op);
if (!current_fh->fh_dentry) {
if (!(opdesc->op_flags & ALLOWED_WITHOUT_FH)) {
op->status = nfserr_nofilehandle;
goto encode_op;
}
} else if (current_fh->fh_export->ex_fslocs.migrated &&
!(opdesc->op_flags & ALLOWED_ON_ABSENT_FS)) {
op->status = nfserr_moved;
goto encode_op;
}
fh_clear_wcc(current_fh);
/* If op is non-idempotent */
if (opdesc->op_flags & OP_MODIFIES_SOMETHING) {
/*
* Don't execute this op if we couldn't encode a
* succesful reply:
*/
u32 plen = opdesc->op_rsize_bop(rqstp, op);
/*
* Plus if there's another operation, make sure
* we'll have space to at least encode an error:
*/
if (resp->opcnt < args->opcnt)
plen += COMPOUND_ERR_SLACK_SPACE;
op->status = nfsd4_check_resp_size(resp, plen);
}
if (op->status)
goto encode_op;
if (opdesc->op_get_currentstateid)
opdesc->op_get_currentstateid(cstate, &op->u);
op->status = opdesc->op_func(rqstp, cstate, &op->u);
if (!op->status) {
if (opdesc->op_set_currentstateid)
opdesc->op_set_currentstateid(cstate, &op->u);
if (opdesc->op_flags & OP_CLEAR_STATEID)
clear_current_stateid(cstate);
if (need_wrongsec_check(rqstp))
op->status = check_nfsd_access(current_fh->fh_export, rqstp);
}
encode_op:
/* Only from SEQUENCE */
if (cstate->status == nfserr_replay_cache) {
dprintk("%s NFS4.1 replay from cache\n", __func__);
status = op->status;
goto out;
}
if (op->status == nfserr_replay_me) {
op->replay = &cstate->replay_owner->so_replay;
nfsd4_encode_replay(&resp->xdr, op);
status = op->status = op->replay->rp_status;
} else {
nfsd4_encode_operation(resp, op);
status = op->status;
}
dprintk("nfsv4 compound op %p opcnt %d #%d: %d: status %d\n",
args->ops, args->opcnt, resp->opcnt, op->opnum,
be32_to_cpu(status));
nfsd4_cstate_clear_replay(cstate);
nfsd4_increment_op_stats(op->opnum);
}
cstate->status = status;
fh_put(current_fh);
fh_put(save_fh);
BUG_ON(cstate->replay_owner);
out:
/* Reset deferral mechanism for RPC deferrals */
set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
dprintk("nfsv4 compound returned %d\n", ntohl(status));
return status;
}
#define op_encode_hdr_size (2)
#define op_encode_stateid_maxsz (XDR_QUADLEN(NFS4_STATEID_SIZE))
#define op_encode_verifier_maxsz (XDR_QUADLEN(NFS4_VERIFIER_SIZE))
#define op_encode_change_info_maxsz (5)
#define nfs4_fattr_bitmap_maxsz (4)
/* We'll fall back on returning no lockowner if run out of space: */
#define op_encode_lockowner_maxsz (0)
#define op_encode_lock_denied_maxsz (8 + op_encode_lockowner_maxsz)
#define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
#define op_encode_ace_maxsz (3 + nfs4_owner_maxsz)
#define op_encode_delegation_maxsz (1 + op_encode_stateid_maxsz + 1 + \
op_encode_ace_maxsz)
#define op_encode_channel_attrs_maxsz (6 + 1 + 1)
static inline u32 nfsd4_only_status_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size) * sizeof(__be32);
}
static inline u32 nfsd4_status_stateid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_stateid_maxsz)* sizeof(__be32);
}
static inline u32 nfsd4_access_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
/* ac_supported, ac_resp_access */
return (op_encode_hdr_size + 2)* sizeof(__be32);
}
static inline u32 nfsd4_commit_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_verifier_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_create_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz
+ nfs4_fattr_bitmap_maxsz) * sizeof(__be32);
}
/*
* Note since this is an idempotent operation we won't insist on failing
* the op prematurely if the estimate is too large. We may turn off splice
* reads unnecessarily.
*/
static inline u32 nfsd4_getattr_rsize(struct svc_rqst *rqstp,
struct nfsd4_op *op)
{
u32 *bmap = op->u.getattr.ga_bmval;
u32 bmap0 = bmap[0], bmap1 = bmap[1], bmap2 = bmap[2];
u32 ret = 0;
if (bmap0 & FATTR4_WORD0_ACL)
return svc_max_payload(rqstp);
if (bmap0 & FATTR4_WORD0_FS_LOCATIONS)
return svc_max_payload(rqstp);
if (bmap1 & FATTR4_WORD1_OWNER) {
ret += IDMAP_NAMESZ + 4;
bmap1 &= ~FATTR4_WORD1_OWNER;
}
if (bmap1 & FATTR4_WORD1_OWNER_GROUP) {
ret += IDMAP_NAMESZ + 4;
bmap1 &= ~FATTR4_WORD1_OWNER_GROUP;
}
if (bmap0 & FATTR4_WORD0_FILEHANDLE) {
ret += NFS4_FHSIZE + 4;
bmap0 &= ~FATTR4_WORD0_FILEHANDLE;
}
if (bmap2 & FATTR4_WORD2_SECURITY_LABEL) {
ret += NFS4_MAXLABELLEN + 12;
bmap2 &= ~FATTR4_WORD2_SECURITY_LABEL;
}
/*
* Largest of remaining attributes are 16 bytes (e.g.,
* supported_attributes)
*/
ret += 16 * (hweight32(bmap0) + hweight32(bmap1) + hweight32(bmap2));
/* bitmask, length */
ret += 20;
return ret;
}
static inline u32 nfsd4_getfh_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 1) * sizeof(__be32) + NFS4_FHSIZE;
}
static inline u32 nfsd4_link_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz)
* sizeof(__be32);
}
static inline u32 nfsd4_lock_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_lock_denied_maxsz)
* sizeof(__be32);
}
static inline u32 nfsd4_open_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_stateid_maxsz
+ op_encode_change_info_maxsz + 1
+ nfs4_fattr_bitmap_maxsz
+ op_encode_delegation_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_read_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
u32 maxcount = 0, rlen = 0;
maxcount = svc_max_payload(rqstp);
rlen = min(op->u.read.rd_length, maxcount);
return (op_encode_hdr_size + 2 + XDR_QUADLEN(rlen)) * sizeof(__be32);
}
static inline u32 nfsd4_readdir_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
u32 maxcount = 0, rlen = 0;
maxcount = svc_max_payload(rqstp);
rlen = min(op->u.readdir.rd_maxcount, maxcount);
return (op_encode_hdr_size + op_encode_verifier_maxsz +
XDR_QUADLEN(rlen)) * sizeof(__be32);
}
static inline u32 nfsd4_readlink_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 1) * sizeof(__be32) + PAGE_SIZE;
}
static inline u32 nfsd4_remove_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz)
* sizeof(__be32);
}
static inline u32 nfsd4_rename_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz
+ op_encode_change_info_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_sequence_rsize(struct svc_rqst *rqstp,
struct nfsd4_op *op)
{
return (op_encode_hdr_size
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) * sizeof(__be32);
}
static inline u32 nfsd4_test_stateid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 1 + op->u.test_stateid.ts_num_ids)
* sizeof(__be32);
}
static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + nfs4_fattr_bitmap_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_secinfo_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + RPC_AUTH_MAXFLAVOR *
(4 + XDR_QUADLEN(GSS_OID_MAX_LEN))) * sizeof(__be32);
}
static inline u32 nfsd4_setclientid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + XDR_QUADLEN(NFS4_VERIFIER_SIZE)) *
sizeof(__be32);
}
static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + op_encode_verifier_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\
1 + 1 + /* eir_flags, spr_how */\
4 + /* spo_must_enforce & _allow with bitmap */\
2 + /*eir_server_owner.so_minor_id */\
/* eir_server_owner.so_major_id<> */\
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
/* eir_server_scope<> */\
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
1 + /* eir_server_impl_id array length */\
0 /* ignored eir_server_impl_id contents */) * sizeof(__be32);
}
static inline u32 nfsd4_bind_conn_to_session_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + /* bctsr_sessid */\
2 /* bctsr_dir, use_conn_in_rdma_mode */) * sizeof(__be32);
}
static inline u32 nfsd4_create_session_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + /* sessionid */\
2 + /* csr_sequence, csr_flags */\
op_encode_channel_attrs_maxsz + \
op_encode_channel_attrs_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_copy_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* wr_callback */ +
op_encode_stateid_maxsz /* wr_callback */ +
2 /* wr_count */ +
1 /* wr_committed */ +
op_encode_verifier_maxsz +
1 /* cr_consecutive */ +
1 /* cr_synchronous */) * sizeof(__be32);
}
#ifdef CONFIG_NFSD_PNFS
static inline u32 nfsd4_getdeviceinfo_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
u32 maxcount = 0, rlen = 0;
maxcount = svc_max_payload(rqstp);
rlen = min(op->u.getdeviceinfo.gd_maxcount, maxcount);
return (op_encode_hdr_size +
1 /* gd_layout_type*/ +
XDR_QUADLEN(rlen) +
2 /* gd_notify_types */) * sizeof(__be32);
}
/*
* At this stage we don't really know what layout driver will handle the request,
* so we need to define an arbitrary upper bound here.
*/
#define MAX_LAYOUT_SIZE 128
static inline u32 nfsd4_layoutget_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* logr_return_on_close */ +
op_encode_stateid_maxsz +
1 /* nr of layouts */ +
MAX_LAYOUT_SIZE) * sizeof(__be32);
}
static inline u32 nfsd4_layoutcommit_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* locr_newsize */ +
2 /* ns_size */) * sizeof(__be32);
}
static inline u32 nfsd4_layoutreturn_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* lrs_stateid */ +
op_encode_stateid_maxsz) * sizeof(__be32);
}
#endif /* CONFIG_NFSD_PNFS */
static inline u32 nfsd4_seek_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 3) * sizeof(__be32);
}
static struct nfsd4_operation nfsd4_ops[] = {
[OP_ACCESS] = {
.op_func = (nfsd4op_func)nfsd4_access,
.op_name = "OP_ACCESS",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_access_rsize,
},
[OP_CLOSE] = {
.op_func = (nfsd4op_func)nfsd4_close,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_CLOSE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
.op_get_currentstateid = (stateid_getter)nfsd4_get_closestateid,
.op_set_currentstateid = (stateid_setter)nfsd4_set_closestateid,
},
[OP_COMMIT] = {
.op_func = (nfsd4op_func)nfsd4_commit,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_COMMIT",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_commit_rsize,
},
[OP_CREATE] = {
.op_func = (nfsd4op_func)nfsd4_create,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME | OP_CLEAR_STATEID,
.op_name = "OP_CREATE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_create_rsize,
},
[OP_DELEGRETURN] = {
.op_func = (nfsd4op_func)nfsd4_delegreturn,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_DELEGRETURN",
.op_rsize_bop = nfsd4_only_status_rsize,
.op_get_currentstateid = (stateid_getter)nfsd4_get_delegreturnstateid,
},
[OP_GETATTR] = {
.op_func = (nfsd4op_func)nfsd4_getattr,
.op_flags = ALLOWED_ON_ABSENT_FS,
.op_rsize_bop = nfsd4_getattr_rsize,
.op_name = "OP_GETATTR",
},
[OP_GETFH] = {
.op_func = (nfsd4op_func)nfsd4_getfh,
.op_name = "OP_GETFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_getfh_rsize,
},
[OP_LINK] = {
.op_func = (nfsd4op_func)nfsd4_link,
.op_flags = ALLOWED_ON_ABSENT_FS | OP_MODIFIES_SOMETHING
| OP_CACHEME,
.op_name = "OP_LINK",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_link_rsize,
},
[OP_LOCK] = {
.op_func = (nfsd4op_func)nfsd4_lock,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LOCK",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_lock_rsize,
.op_set_currentstateid = (stateid_setter)nfsd4_set_lockstateid,
},
[OP_LOCKT] = {
.op_func = (nfsd4op_func)nfsd4_lockt,
.op_name = "OP_LOCKT",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_lock_rsize,
},
[OP_LOCKU] = {
.op_func = (nfsd4op_func)nfsd4_locku,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LOCKU",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
.op_get_currentstateid = (stateid_getter)nfsd4_get_lockustateid,
},
[OP_LOOKUP] = {
.op_func = (nfsd4op_func)nfsd4_lookup,
.op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID,
.op_name = "OP_LOOKUP",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_LOOKUPP] = {
.op_func = (nfsd4op_func)nfsd4_lookupp,
.op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID,
.op_name = "OP_LOOKUPP",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_NVERIFY] = {
.op_func = (nfsd4op_func)nfsd4_nverify,
.op_name = "OP_NVERIFY",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_OPEN] = {
.op_func = (nfsd4op_func)nfsd4_open,
.op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_open_rsize,
.op_set_currentstateid = (stateid_setter)nfsd4_set_openstateid,
},
[OP_OPEN_CONFIRM] = {
.op_func = (nfsd4op_func)nfsd4_open_confirm,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN_CONFIRM",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
},
[OP_OPEN_DOWNGRADE] = {
.op_func = (nfsd4op_func)nfsd4_open_downgrade,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN_DOWNGRADE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
.op_get_currentstateid = (stateid_getter)nfsd4_get_opendowngradestateid,
.op_set_currentstateid = (stateid_setter)nfsd4_set_opendowngradestateid,
},
[OP_PUTFH] = {
.op_func = (nfsd4op_func)nfsd4_putfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_CLEAR_STATEID,
.op_name = "OP_PUTFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_PUTPUBFH] = {
.op_func = (nfsd4op_func)nfsd4_putrootfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_CLEAR_STATEID,
.op_name = "OP_PUTPUBFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_PUTROOTFH] = {
.op_func = (nfsd4op_func)nfsd4_putrootfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_CLEAR_STATEID,
.op_name = "OP_PUTROOTFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_READ] = {
.op_func = (nfsd4op_func)nfsd4_read,
.op_name = "OP_READ",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_read_rsize,
.op_get_currentstateid = (stateid_getter)nfsd4_get_readstateid,
},
[OP_READDIR] = {
.op_func = (nfsd4op_func)nfsd4_readdir,
.op_name = "OP_READDIR",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_readdir_rsize,
},
[OP_READLINK] = {
.op_func = (nfsd4op_func)nfsd4_readlink,
.op_name = "OP_READLINK",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_readlink_rsize,
},
[OP_REMOVE] = {
.op_func = (nfsd4op_func)nfsd4_remove,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_REMOVE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_remove_rsize,
},
[OP_RENAME] = {
.op_func = (nfsd4op_func)nfsd4_rename,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_RENAME",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_rename_rsize,
},
[OP_RENEW] = {
.op_func = (nfsd4op_func)nfsd4_renew,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING,
.op_name = "OP_RENEW",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_RESTOREFH] = {
.op_func = (nfsd4op_func)nfsd4_restorefh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
.op_name = "OP_RESTOREFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SAVEFH] = {
.op_func = (nfsd4op_func)nfsd4_savefh,
.op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
.op_name = "OP_SAVEFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SECINFO] = {
.op_func = (nfsd4op_func)nfsd4_secinfo,
.op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_SECINFO",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_secinfo_rsize,
},
[OP_SETATTR] = {
.op_func = (nfsd4op_func)nfsd4_setattr,
.op_name = "OP_SETATTR",
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_rsize_bop = (nfsd4op_rsize)nfsd4_setattr_rsize,
.op_get_currentstateid = (stateid_getter)nfsd4_get_setattrstateid,
},
[OP_SETCLIENTID] = {
.op_func = (nfsd4op_func)nfsd4_setclientid,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_SETCLIENTID",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_setclientid_rsize,
},
[OP_SETCLIENTID_CONFIRM] = {
.op_func = (nfsd4op_func)nfsd4_setclientid_confirm,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_SETCLIENTID_CONFIRM",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_VERIFY] = {
.op_func = (nfsd4op_func)nfsd4_verify,
.op_name = "OP_VERIFY",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_WRITE] = {
.op_func = (nfsd4op_func)nfsd4_write,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_WRITE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize,
.op_get_currentstateid = (stateid_getter)nfsd4_get_writestateid,
},
[OP_RELEASE_LOCKOWNER] = {
.op_func = (nfsd4op_func)nfsd4_release_lockowner,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING,
.op_name = "OP_RELEASE_LOCKOWNER",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
/* NFSv4.1 operations */
[OP_EXCHANGE_ID] = {
.op_func = (nfsd4op_func)nfsd4_exchange_id,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_EXCHANGE_ID",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_exchange_id_rsize,
},
[OP_BACKCHANNEL_CTL] = {
.op_func = (nfsd4op_func)nfsd4_backchannel_ctl,
.op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_BACKCHANNEL_CTL",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_BIND_CONN_TO_SESSION] = {
.op_func = (nfsd4op_func)nfsd4_bind_conn_to_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_BIND_CONN_TO_SESSION",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_bind_conn_to_session_rsize,
},
[OP_CREATE_SESSION] = {
.op_func = (nfsd4op_func)nfsd4_create_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_CREATE_SESSION",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_create_session_rsize,
},
[OP_DESTROY_SESSION] = {
.op_func = (nfsd4op_func)nfsd4_destroy_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_DESTROY_SESSION",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SEQUENCE] = {
.op_func = (nfsd4op_func)nfsd4_sequence,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
.op_name = "OP_SEQUENCE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_sequence_rsize,
},
[OP_DESTROY_CLIENTID] = {
.op_func = (nfsd4op_func)nfsd4_destroy_clientid,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_DESTROY_CLIENTID",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_RECLAIM_COMPLETE] = {
.op_func = (nfsd4op_func)nfsd4_reclaim_complete,
.op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_RECLAIM_COMPLETE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SECINFO_NO_NAME] = {
.op_func = (nfsd4op_func)nfsd4_secinfo_no_name,
.op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_SECINFO_NO_NAME",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_secinfo_rsize,
},
[OP_TEST_STATEID] = {
.op_func = (nfsd4op_func)nfsd4_test_stateid,
.op_flags = ALLOWED_WITHOUT_FH,
.op_name = "OP_TEST_STATEID",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_test_stateid_rsize,
},
[OP_FREE_STATEID] = {
.op_func = (nfsd4op_func)nfsd4_free_stateid,
.op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_FREE_STATEID",
.op_get_currentstateid = (stateid_getter)nfsd4_get_freestateid,
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = {
.op_func = (nfsd4op_func)nfsd4_getdeviceinfo,
.op_flags = ALLOWED_WITHOUT_FH,
.op_name = "OP_GETDEVICEINFO",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_getdeviceinfo_rsize,
},
[OP_LAYOUTGET] = {
.op_func = (nfsd4op_func)nfsd4_layoutget,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LAYOUTGET",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutget_rsize,
},
[OP_LAYOUTCOMMIT] = {
.op_func = (nfsd4op_func)nfsd4_layoutcommit,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LAYOUTCOMMIT",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutcommit_rsize,
},
[OP_LAYOUTRETURN] = {
.op_func = (nfsd4op_func)nfsd4_layoutreturn,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LAYOUTRETURN",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutreturn_rsize,
},
#endif /* CONFIG_NFSD_PNFS */
/* NFSv4.2 operations */
[OP_ALLOCATE] = {
.op_func = (nfsd4op_func)nfsd4_allocate,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_ALLOCATE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_DEALLOCATE] = {
.op_func = (nfsd4op_func)nfsd4_deallocate,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_DEALLOCATE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_CLONE] = {
.op_func = (nfsd4op_func)nfsd4_clone,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_CLONE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_COPY] = {
.op_func = (nfsd4op_func)nfsd4_copy,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_COPY",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_copy_rsize,
},
[OP_SEEK] = {
.op_func = (nfsd4op_func)nfsd4_seek,
.op_name = "OP_SEEK",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_seek_rsize,
},
};
/**
* nfsd4_spo_must_allow - Determine if the compound op contains an
* operation that is allowed to be sent with machine credentials
*
* @rqstp: a pointer to the struct svc_rqst
*
* Checks to see if the compound contains a spo_must_allow op
* and confirms that it was sent with the proper machine creds.
*/
bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct nfsd4_compoundargs *argp = rqstp->rq_argp;
struct nfsd4_op *this = &argp->ops[resp->opcnt - 1];
struct nfsd4_compound_state *cstate = &resp->cstate;
struct nfs4_op_map *allow = &cstate->clp->cl_spo_must_allow;
u32 opiter;
if (!cstate->minorversion)
return false;
if (cstate->spo_must_allowed == true)
return true;
opiter = resp->opcnt;
while (opiter < argp->opcnt) {
this = &argp->ops[opiter++];
if (test_bit(this->opnum, allow->u.longs) &&
cstate->clp->cl_mach_cred &&
nfsd4_mach_creds_match(cstate->clp, rqstp)) {
cstate->spo_must_allowed = true;
return true;
}
}
cstate->spo_must_allowed = false;
return false;
}
int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
if (op->opnum == OP_ILLEGAL || op->status == nfserr_notsupp)
return op_encode_hdr_size * sizeof(__be32);
BUG_ON(OPDESC(op)->op_rsize_bop == NULL);
return OPDESC(op)->op_rsize_bop(rqstp, op);
}
void warn_on_nonidempotent_op(struct nfsd4_op *op)
{
if (OPDESC(op)->op_flags & OP_MODIFIES_SOMETHING) {
pr_err("unable to encode reply to nonidempotent op %d (%s)\n",
op->opnum, nfsd4_op_name(op->opnum));
WARN_ON_ONCE(1);
}
}
static const char *nfsd4_op_name(unsigned opnum)
{
if (opnum < ARRAY_SIZE(nfsd4_ops))
return nfsd4_ops[opnum].op_name;
return "unknown_operation";
}
#define nfsd4_voidres nfsd4_voidargs
struct nfsd4_voidargs { int dummy; };
static struct svc_procedure nfsd_procedures4[2] = {
[NFSPROC4_NULL] = {
.pc_func = (svc_procfunc) nfsd4_proc_null,
.pc_encode = (kxdrproc_t) nfs4svc_encode_voidres,
.pc_argsize = sizeof(struct nfsd4_voidargs),
.pc_ressize = sizeof(struct nfsd4_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 1,
},
[NFSPROC4_COMPOUND] = {
.pc_func = (svc_procfunc) nfsd4_proc_compound,
.pc_decode = (kxdrproc_t) nfs4svc_decode_compoundargs,
.pc_encode = (kxdrproc_t) nfs4svc_encode_compoundres,
.pc_argsize = sizeof(struct nfsd4_compoundargs),
.pc_ressize = sizeof(struct nfsd4_compoundres),
.pc_release = nfsd4_release_compoundargs,
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = NFSD_BUFSIZE/4,
},
};
struct svc_version nfsd_version4 = {
.vs_vers = 4,
.vs_nproc = 2,
.vs_proc = nfsd_procedures4,
.vs_dispatch = nfsd_dispatch,
.vs_xdrsize = NFS4_SVC_XDRSIZE,
.vs_rpcb_optnl = true,
.vs_need_cong_ctrl = true,
};
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| ./CrossVul/dataset_final_sorted/CWE-404/c/bad_3351_4 |
crossvul-cpp_data_bad_3351_1 | /*
* linux/fs/lockd/svclock.c
*
* Handling of server-side locks, mostly of the blocked variety.
* This is the ugliest part of lockd because we tread on very thin ice.
* GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
* IMNSHO introducing the grant callback into the NLM protocol was one
* of the worst ideas Sun ever had. Except maybe for the idea of doing
* NFS file locking at all.
*
* I'm trying hard to avoid race conditions by protecting most accesses
* to a file's list of blocked locks through a semaphore. The global
* list of blocked locks is not protected in this fashion however.
* Therefore, some functions (such as the RPC callback for the async grant
* call) move blocked locks towards the head of the list *while some other
* process might be traversing it*. This should not be a problem in
* practice, because this will only cause functions traversing the list
* to visit some blocks twice.
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc_xprt.h>
#include <linux/lockd/nlm.h>
#include <linux/lockd/lockd.h>
#include <linux/kthread.h>
#define NLMDBG_FACILITY NLMDBG_SVCLOCK
#ifdef CONFIG_LOCKD_V4
#define nlm_deadlock nlm4_deadlock
#else
#define nlm_deadlock nlm_lck_denied
#endif
static void nlmsvc_release_block(struct nlm_block *block);
static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
static void nlmsvc_remove_block(struct nlm_block *block);
static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
static void nlmsvc_freegrantargs(struct nlm_rqst *call);
static const struct rpc_call_ops nlmsvc_grant_ops;
/*
* The list of blocked locks to retry
*/
static LIST_HEAD(nlm_blocked);
static DEFINE_SPINLOCK(nlm_blocked_lock);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
{
/*
* We can get away with a static buffer because this is only called
* from lockd, which is single-threaded.
*/
static char buf[2*NLM_MAXCOOKIELEN+1];
unsigned int i, len = sizeof(buf);
char *p = buf;
len--; /* allow for trailing \0 */
if (len < 3)
return "???";
for (i = 0 ; i < cookie->len ; i++) {
if (len < 2) {
strcpy(p-3, "...");
break;
}
sprintf(p, "%02x", cookie->data[i]);
p += 2;
len -= 2;
}
*p = '\0';
return buf;
}
#endif
/*
* Insert a blocked lock into the global list
*/
static void
nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
{
struct nlm_block *b;
struct list_head *pos;
dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
if (list_empty(&block->b_list)) {
kref_get(&block->b_count);
} else {
list_del_init(&block->b_list);
}
pos = &nlm_blocked;
if (when != NLM_NEVER) {
if ((when += jiffies) == NLM_NEVER)
when ++;
list_for_each(pos, &nlm_blocked) {
b = list_entry(pos, struct nlm_block, b_list);
if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
break;
}
/* On normal exit from the loop, pos == &nlm_blocked,
* so we will be adding to the end of the list - good
*/
}
list_add_tail(&block->b_list, pos);
block->b_when = when;
}
static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
{
spin_lock(&nlm_blocked_lock);
nlmsvc_insert_block_locked(block, when);
spin_unlock(&nlm_blocked_lock);
}
/*
* Remove a block from the global list
*/
static inline void
nlmsvc_remove_block(struct nlm_block *block)
{
if (!list_empty(&block->b_list)) {
spin_lock(&nlm_blocked_lock);
list_del_init(&block->b_list);
spin_unlock(&nlm_blocked_lock);
nlmsvc_release_block(block);
}
}
/*
* Find a block for a given lock
*/
static struct nlm_block *
nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
{
struct nlm_block *block;
struct file_lock *fl;
dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
file, lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end, lock->fl.fl_type);
list_for_each_entry(block, &nlm_blocked, b_list) {
fl = &block->b_call->a_args.lock.fl;
dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
block->b_file, fl->fl_pid,
(long long)fl->fl_start,
(long long)fl->fl_end, fl->fl_type,
nlmdbg_cookie2a(&block->b_call->a_args.cookie));
if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
kref_get(&block->b_count);
return block;
}
}
return NULL;
}
static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
{
if (a->len != b->len)
return 0;
if (memcmp(a->data, b->data, a->len))
return 0;
return 1;
}
/*
* Find a block with a given NLM cookie.
*/
static inline struct nlm_block *
nlmsvc_find_block(struct nlm_cookie *cookie)
{
struct nlm_block *block;
list_for_each_entry(block, &nlm_blocked, b_list) {
if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
goto found;
}
return NULL;
found:
dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
kref_get(&block->b_count);
return block;
}
/*
* Create a block and initialize it.
*
* Note: we explicitly set the cookie of the grant reply to that of
* the blocked lock request. The spec explicitly mentions that the client
* should _not_ rely on the callback containing the same cookie as the
* request, but (as I found out later) that's because some implementations
* do just this. Never mind the standards comittees, they support our
* logging industries.
*
* 10 years later: I hope we can safely ignore these old and broken
* clients by now. Let's fix this so we can uniquely identify an incoming
* GRANTED_RES message by cookie, without having to rely on the client's IP
* address. --okir
*/
static struct nlm_block *
nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
struct nlm_file *file, struct nlm_lock *lock,
struct nlm_cookie *cookie)
{
struct nlm_block *block;
struct nlm_rqst *call = NULL;
call = nlm_alloc_call(host);
if (call == NULL)
return NULL;
/* Allocate memory for block, and initialize arguments */
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (block == NULL)
goto failed;
kref_init(&block->b_count);
INIT_LIST_HEAD(&block->b_list);
INIT_LIST_HEAD(&block->b_flist);
if (!nlmsvc_setgrantargs(call, lock))
goto failed_free;
/* Set notifier function for VFS, and init args */
call->a_args.lock.fl.fl_flags |= FL_SLEEP;
call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
nlmclnt_next_cookie(&call->a_args.cookie);
dprintk("lockd: created block %p...\n", block);
/* Create and initialize the block */
block->b_daemon = rqstp->rq_server;
block->b_host = host;
block->b_file = file;
file->f_count++;
/* Add to file's list of blocks */
list_add(&block->b_flist, &file->f_blocks);
/* Set up RPC arguments for callback */
block->b_call = call;
call->a_flags = RPC_TASK_ASYNC;
call->a_block = block;
return block;
failed_free:
kfree(block);
failed:
nlmsvc_release_call(call);
return NULL;
}
/*
* Delete a block.
* It is the caller's responsibility to check whether the file
* can be closed hereafter.
*/
static int nlmsvc_unlink_block(struct nlm_block *block)
{
int status;
dprintk("lockd: unlinking block %p...\n", block);
/* Remove block from list */
status = posix_unblock_lock(&block->b_call->a_args.lock.fl);
nlmsvc_remove_block(block);
return status;
}
static void nlmsvc_free_block(struct kref *kref)
{
struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
struct nlm_file *file = block->b_file;
dprintk("lockd: freeing block %p...\n", block);
/* Remove block from file's list of blocks */
list_del_init(&block->b_flist);
mutex_unlock(&file->f_mutex);
nlmsvc_freegrantargs(block->b_call);
nlmsvc_release_call(block->b_call);
nlm_release_file(block->b_file);
kfree(block);
}
static void nlmsvc_release_block(struct nlm_block *block)
{
if (block != NULL)
kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
}
/*
* Loop over all blocks and delete blocks held by
* a matching host.
*/
void nlmsvc_traverse_blocks(struct nlm_host *host,
struct nlm_file *file,
nlm_host_match_fn_t match)
{
struct nlm_block *block, *next;
restart:
mutex_lock(&file->f_mutex);
list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
if (!match(block->b_host, host))
continue;
/* Do not destroy blocks that are not on
* the global retry list - why? */
if (list_empty(&block->b_list))
continue;
kref_get(&block->b_count);
mutex_unlock(&file->f_mutex);
nlmsvc_unlink_block(block);
nlmsvc_release_block(block);
goto restart;
}
mutex_unlock(&file->f_mutex);
}
/*
* Initialize arguments for GRANTED call. The nlm_rqst structure
* has been cleared already.
*/
static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
{
locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
call->a_args.lock.caller = utsname()->nodename;
call->a_args.lock.oh.len = lock->oh.len;
/* set default data area */
call->a_args.lock.oh.data = call->a_owner;
call->a_args.lock.svid = lock->fl.fl_pid;
if (lock->oh.len > NLMCLNT_OHSIZE) {
void *data = kmalloc(lock->oh.len, GFP_KERNEL);
if (!data)
return 0;
call->a_args.lock.oh.data = (u8 *) data;
}
memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
return 1;
}
static void nlmsvc_freegrantargs(struct nlm_rqst *call)
{
if (call->a_args.lock.oh.data != call->a_owner)
kfree(call->a_args.lock.oh.data);
locks_release_private(&call->a_args.lock.fl);
}
/*
* Deferred lock request handling for non-blocking lock
*/
static __be32
nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
{
__be32 status = nlm_lck_denied_nolocks;
block->b_flags |= B_QUEUED;
nlmsvc_insert_block(block, NLM_TIMEOUT);
block->b_cache_req = &rqstp->rq_chandle;
if (rqstp->rq_chandle.defer) {
block->b_deferred_req =
rqstp->rq_chandle.defer(block->b_cache_req);
if (block->b_deferred_req != NULL)
status = nlm_drop_reply;
}
dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
block, block->b_flags, ntohl(status));
return status;
}
/*
* Attempt to establish a lock, and if it can't be granted, block it
* if required.
*/
__be32
nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
struct nlm_host *host, struct nlm_lock *lock, int wait,
struct nlm_cookie *cookie, int reclaim)
{
struct nlm_block *block = NULL;
int error;
__be32 ret;
dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
file_inode(file->f_file)->i_sb->s_id,
file_inode(file->f_file)->i_ino,
lock->fl.fl_type, lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end,
wait);
/* Lock file against concurrent access */
mutex_lock(&file->f_mutex);
/* Get existing block (in case client is busy-waiting)
* or create new block
*/
block = nlmsvc_lookup_block(file, lock);
if (block == NULL) {
block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
ret = nlm_lck_denied_nolocks;
if (block == NULL)
goto out;
lock = &block->b_call->a_args.lock;
} else
lock->fl.fl_flags &= ~FL_SLEEP;
if (block->b_flags & B_QUEUED) {
dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
block, block->b_flags);
if (block->b_granted) {
nlmsvc_unlink_block(block);
ret = nlm_granted;
goto out;
}
if (block->b_flags & B_TIMED_OUT) {
nlmsvc_unlink_block(block);
ret = nlm_lck_denied;
goto out;
}
ret = nlm_drop_reply;
goto out;
}
if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
ret = nlm_lck_denied_grace_period;
goto out;
}
if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
ret = nlm_lck_denied_grace_period;
goto out;
}
if (!wait)
lock->fl.fl_flags &= ~FL_SLEEP;
error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
lock->fl.fl_flags &= ~FL_SLEEP;
dprintk("lockd: vfs_lock_file returned %d\n", error);
switch (error) {
case 0:
ret = nlm_granted;
goto out;
case -EAGAIN:
/*
* If this is a blocking request for an
* already pending lock request then we need
* to put it back on lockd's block list
*/
if (wait)
break;
ret = nlm_lck_denied;
goto out;
case FILE_LOCK_DEFERRED:
if (wait)
break;
/* Filesystem lock operation is in progress
Add it to the queue waiting for callback */
ret = nlmsvc_defer_lock_rqst(rqstp, block);
goto out;
case -EDEADLK:
ret = nlm_deadlock;
goto out;
default: /* includes ENOLCK */
ret = nlm_lck_denied_nolocks;
goto out;
}
ret = nlm_lck_blocked;
/* Append to list of blocked */
nlmsvc_insert_block(block, NLM_NEVER);
out:
mutex_unlock(&file->f_mutex);
nlmsvc_release_block(block);
dprintk("lockd: nlmsvc_lock returned %u\n", ret);
return ret;
}
/*
* Test for presence of a conflicting lock.
*/
__be32
nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
struct nlm_host *host, struct nlm_lock *lock,
struct nlm_lock *conflock, struct nlm_cookie *cookie)
{
int error;
__be32 ret;
dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
file_inode(file->f_file)->i_sb->s_id,
file_inode(file->f_file)->i_ino,
lock->fl.fl_type,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
if (locks_in_grace(SVC_NET(rqstp))) {
ret = nlm_lck_denied_grace_period;
goto out;
}
error = vfs_test_lock(file->f_file, &lock->fl);
if (error) {
/* We can't currently deal with deferred test requests */
if (error == FILE_LOCK_DEFERRED)
WARN_ON_ONCE(1);
ret = nlm_lck_denied_nolocks;
goto out;
}
if (lock->fl.fl_type == F_UNLCK) {
ret = nlm_granted;
goto out;
}
dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
lock->fl.fl_type, (long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
conflock->caller = "somehost"; /* FIXME */
conflock->len = strlen(conflock->caller);
conflock->oh.len = 0; /* don't return OH info */
conflock->svid = lock->fl.fl_pid;
conflock->fl.fl_type = lock->fl.fl_type;
conflock->fl.fl_start = lock->fl.fl_start;
conflock->fl.fl_end = lock->fl.fl_end;
locks_release_private(&lock->fl);
ret = nlm_lck_denied;
out:
return ret;
}
/*
* Remove a lock.
* This implies a CANCEL call: We send a GRANT_MSG, the client replies
* with a GRANT_RES call which gets lost, and calls UNLOCK immediately
* afterwards. In this case the block will still be there, and hence
* must be removed.
*/
__be32
nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
{
int error;
dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
file_inode(file->f_file)->i_sb->s_id,
file_inode(file->f_file)->i_ino,
lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
/* First, cancel any lock that might be there */
nlmsvc_cancel_blocked(net, file, lock);
lock->fl.fl_type = F_UNLCK;
error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
}
/*
* Cancel a previously blocked request.
*
* A cancel request always overrides any grant that may currently
* be in progress.
* The calling procedure must check whether the file can be closed.
*/
__be32
nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
{
struct nlm_block *block;
int status = 0;
dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
file_inode(file->f_file)->i_sb->s_id,
file_inode(file->f_file)->i_ino,
lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
if (locks_in_grace(net))
return nlm_lck_denied_grace_period;
mutex_lock(&file->f_mutex);
block = nlmsvc_lookup_block(file, lock);
mutex_unlock(&file->f_mutex);
if (block != NULL) {
vfs_cancel_lock(block->b_file->f_file,
&block->b_call->a_args.lock.fl);
status = nlmsvc_unlink_block(block);
nlmsvc_release_block(block);
}
return status ? nlm_lck_denied : nlm_granted;
}
/*
* This is a callback from the filesystem for VFS file lock requests.
* It will be used if lm_grant is defined and the filesystem can not
* respond to the request immediately.
* For SETLK or SETLKW request it will get the local posix lock.
* In all cases it will move the block to the head of nlm_blocked q where
* nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
* deferred rpc for GETLK and SETLK.
*/
static void
nlmsvc_update_deferred_block(struct nlm_block *block, int result)
{
block->b_flags |= B_GOT_CALLBACK;
if (result == 0)
block->b_granted = 1;
else
block->b_flags |= B_TIMED_OUT;
}
static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
{
struct nlm_block *block;
int rc = -ENOENT;
spin_lock(&nlm_blocked_lock);
list_for_each_entry(block, &nlm_blocked, b_list) {
if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
block, block->b_flags);
if (block->b_flags & B_QUEUED) {
if (block->b_flags & B_TIMED_OUT) {
rc = -ENOLCK;
break;
}
nlmsvc_update_deferred_block(block, result);
} else if (result == 0)
block->b_granted = 1;
nlmsvc_insert_block_locked(block, 0);
svc_wake_up(block->b_daemon);
rc = 0;
break;
}
}
spin_unlock(&nlm_blocked_lock);
if (rc == -ENOENT)
printk(KERN_WARNING "lockd: grant for unknown block\n");
return rc;
}
/*
* Unblock a blocked lock request. This is a callback invoked from the
* VFS layer when a lock on which we blocked is removed.
*
* This function doesn't grant the blocked lock instantly, but rather moves
* the block to the head of nlm_blocked where it can be picked up by lockd.
*/
static void
nlmsvc_notify_blocked(struct file_lock *fl)
{
struct nlm_block *block;
dprintk("lockd: VFS unblock notification for block %p\n", fl);
spin_lock(&nlm_blocked_lock);
list_for_each_entry(block, &nlm_blocked, b_list) {
if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
nlmsvc_insert_block_locked(block, 0);
spin_unlock(&nlm_blocked_lock);
svc_wake_up(block->b_daemon);
return;
}
}
spin_unlock(&nlm_blocked_lock);
printk(KERN_WARNING "lockd: notification for unknown block!\n");
}
static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2)
{
return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid;
}
/*
* Since NLM uses two "keys" for tracking locks, we need to hash them down
* to one for the blocked_hash. Here, we're just xor'ing the host address
* with the pid in order to create a key value for picking a hash bucket.
*/
static unsigned long
nlmsvc_owner_key(struct file_lock *fl)
{
return (unsigned long)fl->fl_owner ^ (unsigned long)fl->fl_pid;
}
const struct lock_manager_operations nlmsvc_lock_operations = {
.lm_compare_owner = nlmsvc_same_owner,
.lm_owner_key = nlmsvc_owner_key,
.lm_notify = nlmsvc_notify_blocked,
.lm_grant = nlmsvc_grant_deferred,
};
/*
* Try to claim a lock that was previously blocked.
*
* Note that we use both the RPC_GRANTED_MSG call _and_ an async
* RPC thread when notifying the client. This seems like overkill...
* Here's why:
* - we don't want to use a synchronous RPC thread, otherwise
* we might find ourselves hanging on a dead portmapper.
* - Some lockd implementations (e.g. HP) don't react to
* RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
*/
static void
nlmsvc_grant_blocked(struct nlm_block *block)
{
struct nlm_file *file = block->b_file;
struct nlm_lock *lock = &block->b_call->a_args.lock;
int error;
loff_t fl_start, fl_end;
dprintk("lockd: grant blocked lock %p\n", block);
kref_get(&block->b_count);
/* Unlink block request from list */
nlmsvc_unlink_block(block);
/* If b_granted is true this means we've been here before.
* Just retry the grant callback, possibly refreshing the RPC
* binding */
if (block->b_granted) {
nlm_rebind_host(block->b_host);
goto callback;
}
/* Try the lock operation again */
/* vfs_lock_file() can mangle fl_start and fl_end, but we need
* them unchanged for the GRANT_MSG
*/
lock->fl.fl_flags |= FL_SLEEP;
fl_start = lock->fl.fl_start;
fl_end = lock->fl.fl_end;
error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
lock->fl.fl_flags &= ~FL_SLEEP;
lock->fl.fl_start = fl_start;
lock->fl.fl_end = fl_end;
switch (error) {
case 0:
break;
case FILE_LOCK_DEFERRED:
dprintk("lockd: lock still blocked error %d\n", error);
nlmsvc_insert_block(block, NLM_NEVER);
nlmsvc_release_block(block);
return;
default:
printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
-error, __func__);
nlmsvc_insert_block(block, 10 * HZ);
nlmsvc_release_block(block);
return;
}
callback:
/* Lock was granted by VFS. */
dprintk("lockd: GRANTing blocked lock.\n");
block->b_granted = 1;
/* keep block on the list, but don't reattempt until the RPC
* completes or the submission fails
*/
nlmsvc_insert_block(block, NLM_NEVER);
/* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
* will queue up a new one if this one times out
*/
error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
&nlmsvc_grant_ops);
/* RPC submission failed, wait a bit and retry */
if (error < 0)
nlmsvc_insert_block(block, 10 * HZ);
}
/*
* This is the callback from the RPC layer when the NLM_GRANTED_MSG
* RPC call has succeeded or timed out.
* Like all RPC callbacks, it is invoked by the rpciod process, so it
* better not sleep. Therefore, we put the blocked lock on the nlm_blocked
* chain once more in order to have it removed by lockd itself (which can
* then sleep on the file semaphore without disrupting e.g. the nfs client).
*/
static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
{
struct nlm_rqst *call = data;
struct nlm_block *block = call->a_block;
unsigned long timeout;
dprintk("lockd: GRANT_MSG RPC callback\n");
spin_lock(&nlm_blocked_lock);
/* if the block is not on a list at this point then it has
* been invalidated. Don't try to requeue it.
*
* FIXME: it's possible that the block is removed from the list
* after this check but before the nlmsvc_insert_block. In that
* case it will be added back. Perhaps we need better locking
* for nlm_blocked?
*/
if (list_empty(&block->b_list))
goto out;
/* Technically, we should down the file semaphore here. Since we
* move the block towards the head of the queue only, no harm
* can be done, though. */
if (task->tk_status < 0) {
/* RPC error: Re-insert for retransmission */
timeout = 10 * HZ;
} else {
/* Call was successful, now wait for client callback */
timeout = 60 * HZ;
}
nlmsvc_insert_block_locked(block, timeout);
svc_wake_up(block->b_daemon);
out:
spin_unlock(&nlm_blocked_lock);
}
/*
* FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an
* .rpc_release rpc_call_op
*/
static void nlmsvc_grant_release(void *data)
{
struct nlm_rqst *call = data;
nlmsvc_release_block(call->a_block);
}
static const struct rpc_call_ops nlmsvc_grant_ops = {
.rpc_call_done = nlmsvc_grant_callback,
.rpc_release = nlmsvc_grant_release,
};
/*
* We received a GRANT_RES callback. Try to find the corresponding
* block.
*/
void
nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
{
struct nlm_block *block;
dprintk("grant_reply: looking for cookie %x, s=%d \n",
*(unsigned int *)(cookie->data), status);
if (!(block = nlmsvc_find_block(cookie)))
return;
if (block) {
if (status == nlm_lck_denied_grace_period) {
/* Try again in a couple of seconds */
nlmsvc_insert_block(block, 10 * HZ);
} else {
/* Lock is now held by client, or has been rejected.
* In both cases, the block should be removed. */
nlmsvc_unlink_block(block);
}
}
nlmsvc_release_block(block);
}
/* Helper function to handle retry of a deferred block.
* If it is a blocking lock, call grant_blocked.
* For a non-blocking lock or test lock, revisit the request.
*/
static void
retry_deferred_block(struct nlm_block *block)
{
if (!(block->b_flags & B_GOT_CALLBACK))
block->b_flags |= B_TIMED_OUT;
nlmsvc_insert_block(block, NLM_TIMEOUT);
dprintk("revisit block %p flags %d\n", block, block->b_flags);
if (block->b_deferred_req) {
block->b_deferred_req->revisit(block->b_deferred_req, 0);
block->b_deferred_req = NULL;
}
}
/*
* Retry all blocked locks that have been notified. This is where lockd
* picks up locks that can be granted, or grant notifications that must
* be retransmitted.
*/
unsigned long
nlmsvc_retry_blocked(void)
{
unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
struct nlm_block *block;
spin_lock(&nlm_blocked_lock);
while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
if (block->b_when == NLM_NEVER)
break;
if (time_after(block->b_when, jiffies)) {
timeout = block->b_when - jiffies;
break;
}
spin_unlock(&nlm_blocked_lock);
dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
block, block->b_when);
if (block->b_flags & B_QUEUED) {
dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
block, block->b_granted, block->b_flags);
retry_deferred_block(block);
} else
nlmsvc_grant_blocked(block);
spin_lock(&nlm_blocked_lock);
}
spin_unlock(&nlm_blocked_lock);
return timeout;
}
| ./CrossVul/dataset_final_sorted/CWE-404/c/bad_3351_1 |
crossvul-cpp_data_bad_3267_0 | /* Userspace key control operations
*
* Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/key.h>
#include <linux/keyctl.h>
#include <linux/fs.h>
#include <linux/capability.h>
#include <linux/cred.h>
#include <linux/string.h>
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/security.h>
#include <linux/uio.h>
#include <linux/uaccess.h>
#include "internal.h"
#define KEY_MAX_DESC_SIZE 4096
static int key_get_type_from_user(char *type,
const char __user *_type,
unsigned len)
{
int ret;
ret = strncpy_from_user(type, _type, len);
if (ret < 0)
return ret;
if (ret == 0 || ret >= len)
return -EINVAL;
if (type[0] == '.')
return -EPERM;
type[len - 1] = '\0';
return 0;
}
/*
* Extract the description of a new key from userspace and either add it as a
* new key to the specified keyring or update a matching key in that keyring.
*
* If the description is NULL or an empty string, the key type is asked to
* generate one from the payload.
*
* The keyring must be writable so that we can attach the key to it.
*
* If successful, the new key's serial number is returned, otherwise an error
* code is returned.
*/
SYSCALL_DEFINE5(add_key, const char __user *, _type,
const char __user *, _description,
const void __user *, _payload,
size_t, plen,
key_serial_t, ringid)
{
key_ref_t keyring_ref, key_ref;
char type[32], *description;
void *payload;
long ret;
ret = -EINVAL;
if (plen > 1024 * 1024 - 1)
goto error;
/* draw all the data into kernel space */
ret = key_get_type_from_user(type, _type, sizeof(type));
if (ret < 0)
goto error;
description = NULL;
if (_description) {
description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
}
if (!*description) {
kfree(description);
description = NULL;
} else if ((description[0] == '.') &&
(strncmp(type, "keyring", 7) == 0)) {
ret = -EPERM;
goto error2;
}
}
/* pull the payload in if one was supplied */
payload = NULL;
if (_payload) {
ret = -ENOMEM;
payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
if (!payload) {
if (plen <= PAGE_SIZE)
goto error2;
payload = vmalloc(plen);
if (!payload)
goto error2;
}
ret = -EFAULT;
if (copy_from_user(payload, _payload, plen) != 0)
goto error3;
}
/* find the target keyring (which must be writable) */
keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error3;
}
/* create or update the requested key and add it to the target
* keyring */
key_ref = key_create_or_update(keyring_ref, type, description,
payload, plen, KEY_PERM_UNDEF,
KEY_ALLOC_IN_QUOTA);
if (!IS_ERR(key_ref)) {
ret = key_ref_to_ptr(key_ref)->serial;
key_ref_put(key_ref);
}
else {
ret = PTR_ERR(key_ref);
}
key_ref_put(keyring_ref);
error3:
kvfree(payload);
error2:
kfree(description);
error:
return ret;
}
/*
* Search the process keyrings and keyring trees linked from those for a
* matching key. Keyrings must have appropriate Search permission to be
* searched.
*
* If a key is found, it will be attached to the destination keyring if there's
* one specified and the serial number of the key will be returned.
*
* If no key is found, /sbin/request-key will be invoked if _callout_info is
* non-NULL in an attempt to create a key. The _callout_info string will be
* passed to /sbin/request-key to aid with completing the request. If the
* _callout_info string is "" then it will be changed to "-".
*/
SYSCALL_DEFINE4(request_key, const char __user *, _type,
const char __user *, _description,
const char __user *, _callout_info,
key_serial_t, destringid)
{
struct key_type *ktype;
struct key *key;
key_ref_t dest_ref;
size_t callout_len;
char type[32], *description, *callout_info;
long ret;
/* pull the type into kernel space */
ret = key_get_type_from_user(type, _type, sizeof(type));
if (ret < 0)
goto error;
/* pull the description into kernel space */
description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
}
/* pull the callout info into kernel space */
callout_info = NULL;
callout_len = 0;
if (_callout_info) {
callout_info = strndup_user(_callout_info, PAGE_SIZE);
if (IS_ERR(callout_info)) {
ret = PTR_ERR(callout_info);
goto error2;
}
callout_len = strlen(callout_info);
}
/* get the destination keyring if specified */
dest_ref = NULL;
if (destringid) {
dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
KEY_NEED_WRITE);
if (IS_ERR(dest_ref)) {
ret = PTR_ERR(dest_ref);
goto error3;
}
}
/* find the key type */
ktype = key_type_lookup(type);
if (IS_ERR(ktype)) {
ret = PTR_ERR(ktype);
goto error4;
}
/* do the search */
key = request_key_and_link(ktype, description, callout_info,
callout_len, NULL, key_ref_to_ptr(dest_ref),
KEY_ALLOC_IN_QUOTA);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
goto error5;
}
/* wait for the key to finish being constructed */
ret = wait_for_key_construction(key, 1);
if (ret < 0)
goto error6;
ret = key->serial;
error6:
key_put(key);
error5:
key_type_put(ktype);
error4:
key_ref_put(dest_ref);
error3:
kfree(callout_info);
error2:
kfree(description);
error:
return ret;
}
/*
* Get the ID of the specified process keyring.
*
* The requested keyring must have search permission to be found.
*
* If successful, the ID of the requested keyring will be returned.
*/
long keyctl_get_keyring_ID(key_serial_t id, int create)
{
key_ref_t key_ref;
unsigned long lflags;
long ret;
lflags = create ? KEY_LOOKUP_CREATE : 0;
key_ref = lookup_user_key(id, lflags, KEY_NEED_SEARCH);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
}
ret = key_ref_to_ptr(key_ref)->serial;
key_ref_put(key_ref);
error:
return ret;
}
/*
* Join a (named) session keyring.
*
* Create and join an anonymous session keyring or join a named session
* keyring, creating it if necessary. A named session keyring must have Search
* permission for it to be joined. Session keyrings without this permit will
* be skipped over. It is not permitted for userspace to create or join
* keyrings whose name begin with a dot.
*
* If successful, the ID of the joined session keyring will be returned.
*/
long keyctl_join_session_keyring(const char __user *_name)
{
char *name;
long ret;
/* fetch the name from userspace */
name = NULL;
if (_name) {
name = strndup_user(_name, KEY_MAX_DESC_SIZE);
if (IS_ERR(name)) {
ret = PTR_ERR(name);
goto error;
}
ret = -EPERM;
if (name[0] == '.')
goto error_name;
}
/* join the session */
ret = join_session_keyring(name);
error_name:
kfree(name);
error:
return ret;
}
/*
* Update a key's data payload from the given data.
*
* The key must grant the caller Write permission and the key type must support
* updating for this to work. A negative key can be positively instantiated
* with this call.
*
* If successful, 0 will be returned. If the key type does not support
* updating, then -EOPNOTSUPP will be returned.
*/
long keyctl_update_key(key_serial_t id,
const void __user *_payload,
size_t plen)
{
key_ref_t key_ref;
void *payload;
long ret;
ret = -EINVAL;
if (plen > PAGE_SIZE)
goto error;
/* pull the payload in if one was supplied */
payload = NULL;
if (_payload) {
ret = -ENOMEM;
payload = kmalloc(plen, GFP_KERNEL);
if (!payload)
goto error;
ret = -EFAULT;
if (copy_from_user(payload, _payload, plen) != 0)
goto error2;
}
/* find the target key (which must be writable) */
key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
}
/* update the key */
ret = key_update(key_ref, payload, plen);
key_ref_put(key_ref);
error2:
kfree(payload);
error:
return ret;
}
/*
* Revoke a key.
*
* The key must be grant the caller Write or Setattr permission for this to
* work. The key type should give up its quota claim when revoked. The key
* and any links to the key will be automatically garbage collected after a
* certain amount of time (/proc/sys/kernel/keys/gc_delay).
*
* Keys with KEY_FLAG_KEEP set should not be revoked.
*
* If successful, 0 is returned.
*/
long keyctl_revoke_key(key_serial_t id)
{
key_ref_t key_ref;
struct key *key;
long ret;
key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
if (ret != -EACCES)
goto error;
key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
}
}
key = key_ref_to_ptr(key_ref);
ret = 0;
if (test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
key_revoke(key);
key_ref_put(key_ref);
error:
return ret;
}
/*
* Invalidate a key.
*
* The key must be grant the caller Invalidate permission for this to work.
* The key and any links to the key will be automatically garbage collected
* immediately.
*
* Keys with KEY_FLAG_KEEP set should not be invalidated.
*
* If successful, 0 is returned.
*/
long keyctl_invalidate_key(key_serial_t id)
{
key_ref_t key_ref;
struct key *key;
long ret;
kenter("%d", id);
key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
/* Root is permitted to invalidate certain special keys */
if (capable(CAP_SYS_ADMIN)) {
key_ref = lookup_user_key(id, 0, 0);
if (IS_ERR(key_ref))
goto error;
if (test_bit(KEY_FLAG_ROOT_CAN_INVAL,
&key_ref_to_ptr(key_ref)->flags))
goto invalidate;
goto error_put;
}
goto error;
}
invalidate:
key = key_ref_to_ptr(key_ref);
ret = 0;
if (test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
key_invalidate(key);
error_put:
key_ref_put(key_ref);
error:
kleave(" = %ld", ret);
return ret;
}
/*
* Clear the specified keyring, creating an empty process keyring if one of the
* special keyring IDs is used.
*
* The keyring must grant the caller Write permission and not have
* KEY_FLAG_KEEP set for this to work. If successful, 0 will be returned.
*/
long keyctl_keyring_clear(key_serial_t ringid)
{
key_ref_t keyring_ref;
struct key *keyring;
long ret;
keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
/* Root is permitted to invalidate certain special keyrings */
if (capable(CAP_SYS_ADMIN)) {
keyring_ref = lookup_user_key(ringid, 0, 0);
if (IS_ERR(keyring_ref))
goto error;
if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR,
&key_ref_to_ptr(keyring_ref)->flags))
goto clear;
goto error_put;
}
goto error;
}
clear:
keyring = key_ref_to_ptr(keyring_ref);
if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
ret = -EPERM;
else
ret = keyring_clear(keyring);
error_put:
key_ref_put(keyring_ref);
error:
return ret;
}
/*
* Create a link from a keyring to a key if there's no matching key in the
* keyring, otherwise replace the link to the matching key with a link to the
* new key.
*
* The key must grant the caller Link permission and the the keyring must grant
* the caller Write permission. Furthermore, if an additional link is created,
* the keyring's quota will be extended.
*
* If successful, 0 will be returned.
*/
long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
{
key_ref_t keyring_ref, key_ref;
long ret;
keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error;
}
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
}
ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref));
key_ref_put(key_ref);
error2:
key_ref_put(keyring_ref);
error:
return ret;
}
/*
* Unlink a key from a keyring.
*
* The keyring must grant the caller Write permission for this to work; the key
* itself need not grant the caller anything. If the last link to a key is
* removed then that key will be scheduled for destruction.
*
* Keys or keyrings with KEY_FLAG_KEEP set should not be unlinked.
*
* If successful, 0 will be returned.
*/
long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
{
key_ref_t keyring_ref, key_ref;
struct key *keyring, *key;
long ret;
keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error;
}
key_ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
}
keyring = key_ref_to_ptr(keyring_ref);
key = key_ref_to_ptr(key_ref);
if (test_bit(KEY_FLAG_KEEP, &keyring->flags) &&
test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
ret = key_unlink(keyring, key);
key_ref_put(key_ref);
error2:
key_ref_put(keyring_ref);
error:
return ret;
}
/*
* Return a description of a key to userspace.
*
* The key must grant the caller View permission for this to work.
*
* If there's a buffer, we place up to buflen bytes of data into it formatted
* in the following way:
*
* type;uid;gid;perm;description<NUL>
*
* If successful, we return the amount of description available, irrespective
* of how much we may have copied into the buffer.
*/
long keyctl_describe_key(key_serial_t keyid,
char __user *buffer,
size_t buflen)
{
struct key *key, *instkey;
key_ref_t key_ref;
char *infobuf;
long ret;
int desclen, infolen;
key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
if (IS_ERR(key_ref)) {
/* viewing a key under construction is permitted if we have the
* authorisation token handy */
if (PTR_ERR(key_ref) == -EACCES) {
instkey = key_get_instantiation_authkey(keyid);
if (!IS_ERR(instkey)) {
key_put(instkey);
key_ref = lookup_user_key(keyid,
KEY_LOOKUP_PARTIAL,
0);
if (!IS_ERR(key_ref))
goto okay;
}
}
ret = PTR_ERR(key_ref);
goto error;
}
okay:
key = key_ref_to_ptr(key_ref);
desclen = strlen(key->description);
/* calculate how much information we're going to return */
ret = -ENOMEM;
infobuf = kasprintf(GFP_KERNEL,
"%s;%d;%d;%08x;",
key->type->name,
from_kuid_munged(current_user_ns(), key->uid),
from_kgid_munged(current_user_ns(), key->gid),
key->perm);
if (!infobuf)
goto error2;
infolen = strlen(infobuf);
ret = infolen + desclen + 1;
/* consider returning the data */
if (buffer && buflen >= ret) {
if (copy_to_user(buffer, infobuf, infolen) != 0 ||
copy_to_user(buffer + infolen, key->description,
desclen + 1) != 0)
ret = -EFAULT;
}
kfree(infobuf);
error2:
key_ref_put(key_ref);
error:
return ret;
}
/*
* Search the specified keyring and any keyrings it links to for a matching
* key. Only keyrings that grant the caller Search permission will be searched
* (this includes the starting keyring). Only keys with Search permission can
* be found.
*
* If successful, the found key will be linked to the destination keyring if
* supplied and the key has Link permission, and the found key ID will be
* returned.
*/
long keyctl_keyring_search(key_serial_t ringid,
const char __user *_type,
const char __user *_description,
key_serial_t destringid)
{
struct key_type *ktype;
key_ref_t keyring_ref, key_ref, dest_ref;
char type[32], *description;
long ret;
/* pull the type and description into kernel space */
ret = key_get_type_from_user(type, _type, sizeof(type));
if (ret < 0)
goto error;
description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
}
/* get the keyring at which to begin the search */
keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_SEARCH);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error2;
}
/* get the destination keyring if specified */
dest_ref = NULL;
if (destringid) {
dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
KEY_NEED_WRITE);
if (IS_ERR(dest_ref)) {
ret = PTR_ERR(dest_ref);
goto error3;
}
}
/* find the key type */
ktype = key_type_lookup(type);
if (IS_ERR(ktype)) {
ret = PTR_ERR(ktype);
goto error4;
}
/* do the search */
key_ref = keyring_search(keyring_ref, ktype, description);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
/* treat lack or presence of a negative key the same */
if (ret == -EAGAIN)
ret = -ENOKEY;
goto error5;
}
/* link the resulting key to the destination keyring if we can */
if (dest_ref) {
ret = key_permission(key_ref, KEY_NEED_LINK);
if (ret < 0)
goto error6;
ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref));
if (ret < 0)
goto error6;
}
ret = key_ref_to_ptr(key_ref)->serial;
error6:
key_ref_put(key_ref);
error5:
key_type_put(ktype);
error4:
key_ref_put(dest_ref);
error3:
key_ref_put(keyring_ref);
error2:
kfree(description);
error:
return ret;
}
/*
* Read a key's payload.
*
* The key must either grant the caller Read permission, or it must grant the
* caller Search permission when searched for from the process keyrings.
*
* If successful, we place up to buflen bytes of data into the buffer, if one
* is provided, and return the amount of data that is available in the key,
* irrespective of how much we copied into the buffer.
*/
long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
{
struct key *key;
key_ref_t key_ref;
long ret;
/* find the key first */
key_ref = lookup_user_key(keyid, 0, 0);
if (IS_ERR(key_ref)) {
ret = -ENOKEY;
goto error;
}
key = key_ref_to_ptr(key_ref);
/* see if we can read it directly */
ret = key_permission(key_ref, KEY_NEED_READ);
if (ret == 0)
goto can_read_key;
if (ret != -EACCES)
goto error;
/* we can't; see if it's searchable from this process's keyrings
* - we automatically take account of the fact that it may be
* dangling off an instantiation key
*/
if (!is_key_possessed(key_ref)) {
ret = -EACCES;
goto error2;
}
/* the key is probably readable - now try to read it */
can_read_key:
ret = -EOPNOTSUPP;
if (key->type->read) {
/* Read the data with the semaphore held (since we might sleep)
* to protect against the key being updated or revoked.
*/
down_read(&key->sem);
ret = key_validate(key);
if (ret == 0)
ret = key->type->read(key, buffer, buflen);
up_read(&key->sem);
}
error2:
key_put(key);
error:
return ret;
}
/*
* Change the ownership of a key
*
* The key must grant the caller Setattr permission for this to work, though
* the key need not be fully instantiated yet. For the UID to be changed, or
* for the GID to be changed to a group the caller is not a member of, the
* caller must have sysadmin capability. If either uid or gid is -1 then that
* attribute is not changed.
*
* If the UID is to be changed, the new user must have sufficient quota to
* accept the key. The quota deduction will be removed from the old user to
* the new user should the attribute be changed.
*
* If successful, 0 will be returned.
*/
long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
{
struct key_user *newowner, *zapowner = NULL;
struct key *key;
key_ref_t key_ref;
long ret;
kuid_t uid;
kgid_t gid;
uid = make_kuid(current_user_ns(), user);
gid = make_kgid(current_user_ns(), group);
ret = -EINVAL;
if ((user != (uid_t) -1) && !uid_valid(uid))
goto error;
if ((group != (gid_t) -1) && !gid_valid(gid))
goto error;
ret = 0;
if (user == (uid_t) -1 && group == (gid_t) -1)
goto error;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
}
key = key_ref_to_ptr(key_ref);
/* make the changes with the locks held to prevent chown/chown races */
ret = -EACCES;
down_write(&key->sem);
if (!capable(CAP_SYS_ADMIN)) {
/* only the sysadmin can chown a key to some other UID */
if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
goto error_put;
/* only the sysadmin can set the key's GID to a group other
* than one of those that the current process subscribes to */
if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
goto error_put;
}
/* change the UID */
if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) {
ret = -ENOMEM;
newowner = key_user_lookup(uid);
if (!newowner)
goto error_put;
/* transfer the quota burden to the new user */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&newowner->lock);
if (newowner->qnkeys + 1 >= maxkeys ||
newowner->qnbytes + key->quotalen >= maxbytes ||
newowner->qnbytes + key->quotalen <
newowner->qnbytes)
goto quota_overrun;
newowner->qnkeys++;
newowner->qnbytes += key->quotalen;
spin_unlock(&newowner->lock);
spin_lock(&key->user->lock);
key->user->qnkeys--;
key->user->qnbytes -= key->quotalen;
spin_unlock(&key->user->lock);
}
atomic_dec(&key->user->nkeys);
atomic_inc(&newowner->nkeys);
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
atomic_dec(&key->user->nikeys);
atomic_inc(&newowner->nikeys);
}
zapowner = key->user;
key->user = newowner;
key->uid = uid;
}
/* change the GID */
if (group != (gid_t) -1)
key->gid = gid;
ret = 0;
error_put:
up_write(&key->sem);
key_put(key);
if (zapowner)
key_user_put(zapowner);
error:
return ret;
quota_overrun:
spin_unlock(&newowner->lock);
zapowner = newowner;
ret = -EDQUOT;
goto error_put;
}
/*
* Change the permission mask on a key.
*
* The key must grant the caller Setattr permission for this to work, though
* the key need not be fully instantiated yet. If the caller does not have
* sysadmin capability, it may only change the permission on keys that it owns.
*/
long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
{
struct key *key;
key_ref_t key_ref;
long ret;
ret = -EINVAL;
if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL))
goto error;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
}
key = key_ref_to_ptr(key_ref);
/* make the changes with the locks held to prevent chown/chmod races */
ret = -EACCES;
down_write(&key->sem);
/* if we're not the sysadmin, we can only change a key that we own */
if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) {
key->perm = perm;
ret = 0;
}
up_write(&key->sem);
key_put(key);
error:
return ret;
}
/*
* Get the destination keyring for instantiation and check that the caller has
* Write permission on it.
*/
static long get_instantiation_keyring(key_serial_t ringid,
struct request_key_auth *rka,
struct key **_dest_keyring)
{
key_ref_t dkref;
*_dest_keyring = NULL;
/* just return a NULL pointer if we weren't asked to make a link */
if (ringid == 0)
return 0;
/* if a specific keyring is nominated by ID, then use that */
if (ringid > 0) {
dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(dkref))
return PTR_ERR(dkref);
*_dest_keyring = key_ref_to_ptr(dkref);
return 0;
}
if (ringid == KEY_SPEC_REQKEY_AUTH_KEY)
return -EINVAL;
/* otherwise specify the destination keyring recorded in the
* authorisation key (any KEY_SPEC_*_KEYRING) */
if (ringid >= KEY_SPEC_REQUESTOR_KEYRING) {
*_dest_keyring = key_get(rka->dest_keyring);
return 0;
}
return -ENOKEY;
}
/*
* Change the request_key authorisation key on the current process.
*/
static int keyctl_change_reqkey_auth(struct key *key)
{
struct cred *new;
new = prepare_creds();
if (!new)
return -ENOMEM;
key_put(new->request_key_auth);
new->request_key_auth = key_get(key);
return commit_creds(new);
}
/*
* Instantiate a key with the specified payload and link the key into the
* destination keyring if one is given.
*
* The caller must have the appropriate instantiation permit set for this to
* work (see keyctl_assume_authority). No other permissions are required.
*
* If successful, 0 will be returned.
*/
long keyctl_instantiate_key_common(key_serial_t id,
struct iov_iter *from,
key_serial_t ringid)
{
const struct cred *cred = current_cred();
struct request_key_auth *rka;
struct key *instkey, *dest_keyring;
size_t plen = from ? iov_iter_count(from) : 0;
void *payload;
long ret;
kenter("%d,,%zu,%d", id, plen, ringid);
if (!plen)
from = NULL;
ret = -EINVAL;
if (plen > 1024 * 1024 - 1)
goto error;
/* the appropriate instantiation authorisation key must have been
* assumed before calling this */
ret = -EPERM;
instkey = cred->request_key_auth;
if (!instkey)
goto error;
rka = instkey->payload.data[0];
if (rka->target_key->serial != id)
goto error;
/* pull the payload in if one was supplied */
payload = NULL;
if (from) {
ret = -ENOMEM;
payload = kmalloc(plen, GFP_KERNEL);
if (!payload) {
if (plen <= PAGE_SIZE)
goto error;
payload = vmalloc(plen);
if (!payload)
goto error;
}
ret = -EFAULT;
if (!copy_from_iter_full(payload, plen, from))
goto error2;
}
/* find the destination keyring amongst those belonging to the
* requesting task */
ret = get_instantiation_keyring(ringid, rka, &dest_keyring);
if (ret < 0)
goto error2;
/* instantiate the key and link it into a keyring */
ret = key_instantiate_and_link(rka->target_key, payload, plen,
dest_keyring, instkey);
key_put(dest_keyring);
/* discard the assumed authority if it's just been disabled by
* instantiation of the key */
if (ret == 0)
keyctl_change_reqkey_auth(NULL);
error2:
kvfree(payload);
error:
return ret;
}
/*
* Instantiate a key with the specified payload and link the key into the
* destination keyring if one is given.
*
* The caller must have the appropriate instantiation permit set for this to
* work (see keyctl_assume_authority). No other permissions are required.
*
* If successful, 0 will be returned.
*/
long keyctl_instantiate_key(key_serial_t id,
const void __user *_payload,
size_t plen,
key_serial_t ringid)
{
if (_payload && plen) {
struct iovec iov;
struct iov_iter from;
int ret;
ret = import_single_range(WRITE, (void __user *)_payload, plen,
&iov, &from);
if (unlikely(ret))
return ret;
return keyctl_instantiate_key_common(id, &from, ringid);
}
return keyctl_instantiate_key_common(id, NULL, ringid);
}
/*
* Instantiate a key with the specified multipart payload and link the key into
* the destination keyring if one is given.
*
* The caller must have the appropriate instantiation permit set for this to
* work (see keyctl_assume_authority). No other permissions are required.
*
* If successful, 0 will be returned.
*/
long keyctl_instantiate_key_iov(key_serial_t id,
const struct iovec __user *_payload_iov,
unsigned ioc,
key_serial_t ringid)
{
struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
struct iov_iter from;
long ret;
if (!_payload_iov)
ioc = 0;
ret = import_iovec(WRITE, _payload_iov, ioc,
ARRAY_SIZE(iovstack), &iov, &from);
if (ret < 0)
return ret;
ret = keyctl_instantiate_key_common(id, &from, ringid);
kfree(iov);
return ret;
}
/*
* Negatively instantiate the key with the given timeout (in seconds) and link
* the key into the destination keyring if one is given.
*
* The caller must have the appropriate instantiation permit set for this to
* work (see keyctl_assume_authority). No other permissions are required.
*
* The key and any links to the key will be automatically garbage collected
* after the timeout expires.
*
* Negative keys are used to rate limit repeated request_key() calls by causing
* them to return -ENOKEY until the negative key expires.
*
* If successful, 0 will be returned.
*/
long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
{
return keyctl_reject_key(id, timeout, ENOKEY, ringid);
}
/*
* Negatively instantiate the key with the given timeout (in seconds) and error
* code and link the key into the destination keyring if one is given.
*
* The caller must have the appropriate instantiation permit set for this to
* work (see keyctl_assume_authority). No other permissions are required.
*
* The key and any links to the key will be automatically garbage collected
* after the timeout expires.
*
* Negative keys are used to rate limit repeated request_key() calls by causing
* them to return the specified error code until the negative key expires.
*
* If successful, 0 will be returned.
*/
long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error,
key_serial_t ringid)
{
const struct cred *cred = current_cred();
struct request_key_auth *rka;
struct key *instkey, *dest_keyring;
long ret;
kenter("%d,%u,%u,%d", id, timeout, error, ringid);
/* must be a valid error code and mustn't be a kernel special */
if (error <= 0 ||
error >= MAX_ERRNO ||
error == ERESTARTSYS ||
error == ERESTARTNOINTR ||
error == ERESTARTNOHAND ||
error == ERESTART_RESTARTBLOCK)
return -EINVAL;
/* the appropriate instantiation authorisation key must have been
* assumed before calling this */
ret = -EPERM;
instkey = cred->request_key_auth;
if (!instkey)
goto error;
rka = instkey->payload.data[0];
if (rka->target_key->serial != id)
goto error;
/* find the destination keyring if present (which must also be
* writable) */
ret = get_instantiation_keyring(ringid, rka, &dest_keyring);
if (ret < 0)
goto error;
/* instantiate the key and link it into a keyring */
ret = key_reject_and_link(rka->target_key, timeout, error,
dest_keyring, instkey);
key_put(dest_keyring);
/* discard the assumed authority if it's just been disabled by
* instantiation of the key */
if (ret == 0)
keyctl_change_reqkey_auth(NULL);
error:
return ret;
}
/*
* Read or set the default keyring in which request_key() will cache keys and
* return the old setting.
*
* If a process keyring is specified then this will be created if it doesn't
* yet exist. The old setting will be returned if successful.
*/
long keyctl_set_reqkey_keyring(int reqkey_defl)
{
struct cred *new;
int ret, old_setting;
old_setting = current_cred_xxx(jit_keyring);
if (reqkey_defl == KEY_REQKEY_DEFL_NO_CHANGE)
return old_setting;
new = prepare_creds();
if (!new)
return -ENOMEM;
switch (reqkey_defl) {
case KEY_REQKEY_DEFL_THREAD_KEYRING:
ret = install_thread_keyring_to_cred(new);
if (ret < 0)
goto error;
goto set;
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
ret = install_process_keyring_to_cred(new);
if (ret < 0) {
if (ret != -EEXIST)
goto error;
ret = 0;
}
goto set;
case KEY_REQKEY_DEFL_DEFAULT:
case KEY_REQKEY_DEFL_SESSION_KEYRING:
case KEY_REQKEY_DEFL_USER_KEYRING:
case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
goto set;
case KEY_REQKEY_DEFL_NO_CHANGE:
case KEY_REQKEY_DEFL_GROUP_KEYRING:
default:
ret = -EINVAL;
goto error;
}
set:
new->jit_keyring = reqkey_defl;
commit_creds(new);
return old_setting;
error:
abort_creds(new);
return ret;
}
/*
* Set or clear the timeout on a key.
*
* Either the key must grant the caller Setattr permission or else the caller
* must hold an instantiation authorisation token for the key.
*
* The timeout is either 0 to clear the timeout, or a number of seconds from
* the current time. The key and any links to the key will be automatically
* garbage collected after the timeout expires.
*
* Keys with KEY_FLAG_KEEP set should not be timed out.
*
* If successful, 0 is returned.
*/
long keyctl_set_timeout(key_serial_t id, unsigned timeout)
{
struct key *key, *instkey;
key_ref_t key_ref;
long ret;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
/* setting the timeout on a key under construction is permitted
* if we have the authorisation token handy */
if (PTR_ERR(key_ref) == -EACCES) {
instkey = key_get_instantiation_authkey(id);
if (!IS_ERR(instkey)) {
key_put(instkey);
key_ref = lookup_user_key(id,
KEY_LOOKUP_PARTIAL,
0);
if (!IS_ERR(key_ref))
goto okay;
}
}
ret = PTR_ERR(key_ref);
goto error;
}
okay:
key = key_ref_to_ptr(key_ref);
ret = 0;
if (test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
key_set_timeout(key, timeout);
key_put(key);
error:
return ret;
}
/*
* Assume (or clear) the authority to instantiate the specified key.
*
* This sets the authoritative token currently in force for key instantiation.
* This must be done for a key to be instantiated. It has the effect of making
* available all the keys from the caller of the request_key() that created a
* key to request_key() calls made by the caller of this function.
*
* The caller must have the instantiation key in their process keyrings with a
* Search permission grant available to the caller.
*
* If the ID given is 0, then the setting will be cleared and 0 returned.
*
* If the ID given has a matching an authorisation key, then that key will be
* set and its ID will be returned. The authorisation key can be read to get
* the callout information passed to request_key().
*/
long keyctl_assume_authority(key_serial_t id)
{
struct key *authkey;
long ret;
/* special key IDs aren't permitted */
ret = -EINVAL;
if (id < 0)
goto error;
/* we divest ourselves of authority if given an ID of 0 */
if (id == 0) {
ret = keyctl_change_reqkey_auth(NULL);
goto error;
}
/* attempt to assume the authority temporarily granted to us whilst we
* instantiate the specified key
* - the authorisation key must be in the current task's keyrings
* somewhere
*/
authkey = key_get_instantiation_authkey(id);
if (IS_ERR(authkey)) {
ret = PTR_ERR(authkey);
goto error;
}
ret = keyctl_change_reqkey_auth(authkey);
if (ret < 0)
goto error;
key_put(authkey);
ret = authkey->serial;
error:
return ret;
}
/*
* Get a key's the LSM security label.
*
* The key must grant the caller View permission for this to work.
*
* If there's a buffer, then up to buflen bytes of data will be placed into it.
*
* If successful, the amount of information available will be returned,
* irrespective of how much was copied (including the terminal NUL).
*/
long keyctl_get_security(key_serial_t keyid,
char __user *buffer,
size_t buflen)
{
struct key *key, *instkey;
key_ref_t key_ref;
char *context;
long ret;
key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
if (IS_ERR(key_ref)) {
if (PTR_ERR(key_ref) != -EACCES)
return PTR_ERR(key_ref);
/* viewing a key under construction is also permitted if we
* have the authorisation token handy */
instkey = key_get_instantiation_authkey(keyid);
if (IS_ERR(instkey))
return PTR_ERR(instkey);
key_put(instkey);
key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0);
if (IS_ERR(key_ref))
return PTR_ERR(key_ref);
}
key = key_ref_to_ptr(key_ref);
ret = security_key_getsecurity(key, &context);
if (ret == 0) {
/* if no information was returned, give userspace an empty
* string */
ret = 1;
if (buffer && buflen > 0 &&
copy_to_user(buffer, "", 1) != 0)
ret = -EFAULT;
} else if (ret > 0) {
/* return as much data as there's room for */
if (buffer && buflen > 0) {
if (buflen > ret)
buflen = ret;
if (copy_to_user(buffer, context, buflen) != 0)
ret = -EFAULT;
}
kfree(context);
}
key_ref_put(key_ref);
return ret;
}
/*
* Attempt to install the calling process's session keyring on the process's
* parent process.
*
* The keyring must exist and must grant the caller LINK permission, and the
* parent process must be single-threaded and must have the same effective
* ownership as this process and mustn't be SUID/SGID.
*
* The keyring will be emplaced on the parent when it next resumes userspace.
*
* If successful, 0 will be returned.
*/
long keyctl_session_to_parent(void)
{
struct task_struct *me, *parent;
const struct cred *mycred, *pcred;
struct callback_head *newwork, *oldwork;
key_ref_t keyring_r;
struct cred *cred;
int ret;
keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_NEED_LINK);
if (IS_ERR(keyring_r))
return PTR_ERR(keyring_r);
ret = -ENOMEM;
/* our parent is going to need a new cred struct, a new tgcred struct
* and new security data, so we allocate them here to prevent ENOMEM in
* our parent */
cred = cred_alloc_blank();
if (!cred)
goto error_keyring;
newwork = &cred->rcu;
cred->session_keyring = key_ref_to_ptr(keyring_r);
keyring_r = NULL;
init_task_work(newwork, key_change_session_keyring);
me = current;
rcu_read_lock();
write_lock_irq(&tasklist_lock);
ret = -EPERM;
oldwork = NULL;
parent = me->real_parent;
/* the parent mustn't be init and mustn't be a kernel thread */
if (parent->pid <= 1 || !parent->mm)
goto unlock;
/* the parent must be single threaded */
if (!thread_group_empty(parent))
goto unlock;
/* the parent and the child must have different session keyrings or
* there's no point */
mycred = current_cred();
pcred = __task_cred(parent);
if (mycred == pcred ||
mycred->session_keyring == pcred->session_keyring) {
ret = 0;
goto unlock;
}
/* the parent must have the same effective ownership and mustn't be
* SUID/SGID */
if (!uid_eq(pcred->uid, mycred->euid) ||
!uid_eq(pcred->euid, mycred->euid) ||
!uid_eq(pcred->suid, mycred->euid) ||
!gid_eq(pcred->gid, mycred->egid) ||
!gid_eq(pcred->egid, mycred->egid) ||
!gid_eq(pcred->sgid, mycred->egid))
goto unlock;
/* the keyrings must have the same UID */
if ((pcred->session_keyring &&
!uid_eq(pcred->session_keyring->uid, mycred->euid)) ||
!uid_eq(mycred->session_keyring->uid, mycred->euid))
goto unlock;
/* cancel an already pending keyring replacement */
oldwork = task_work_cancel(parent, key_change_session_keyring);
/* the replacement session keyring is applied just prior to userspace
* restarting */
ret = task_work_add(parent, newwork, true);
if (!ret)
newwork = NULL;
unlock:
write_unlock_irq(&tasklist_lock);
rcu_read_unlock();
if (oldwork)
put_cred(container_of(oldwork, struct cred, rcu));
if (newwork)
put_cred(cred);
return ret;
error_keyring:
key_ref_put(keyring_r);
return ret;
}
/*
* The key control system call
*/
SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
unsigned long, arg4, unsigned long, arg5)
{
switch (option) {
case KEYCTL_GET_KEYRING_ID:
return keyctl_get_keyring_ID((key_serial_t) arg2,
(int) arg3);
case KEYCTL_JOIN_SESSION_KEYRING:
return keyctl_join_session_keyring((const char __user *) arg2);
case KEYCTL_UPDATE:
return keyctl_update_key((key_serial_t) arg2,
(const void __user *) arg3,
(size_t) arg4);
case KEYCTL_REVOKE:
return keyctl_revoke_key((key_serial_t) arg2);
case KEYCTL_DESCRIBE:
return keyctl_describe_key((key_serial_t) arg2,
(char __user *) arg3,
(unsigned) arg4);
case KEYCTL_CLEAR:
return keyctl_keyring_clear((key_serial_t) arg2);
case KEYCTL_LINK:
return keyctl_keyring_link((key_serial_t) arg2,
(key_serial_t) arg3);
case KEYCTL_UNLINK:
return keyctl_keyring_unlink((key_serial_t) arg2,
(key_serial_t) arg3);
case KEYCTL_SEARCH:
return keyctl_keyring_search((key_serial_t) arg2,
(const char __user *) arg3,
(const char __user *) arg4,
(key_serial_t) arg5);
case KEYCTL_READ:
return keyctl_read_key((key_serial_t) arg2,
(char __user *) arg3,
(size_t) arg4);
case KEYCTL_CHOWN:
return keyctl_chown_key((key_serial_t) arg2,
(uid_t) arg3,
(gid_t) arg4);
case KEYCTL_SETPERM:
return keyctl_setperm_key((key_serial_t) arg2,
(key_perm_t) arg3);
case KEYCTL_INSTANTIATE:
return keyctl_instantiate_key((key_serial_t) arg2,
(const void __user *) arg3,
(size_t) arg4,
(key_serial_t) arg5);
case KEYCTL_NEGATE:
return keyctl_negate_key((key_serial_t) arg2,
(unsigned) arg3,
(key_serial_t) arg4);
case KEYCTL_SET_REQKEY_KEYRING:
return keyctl_set_reqkey_keyring(arg2);
case KEYCTL_SET_TIMEOUT:
return keyctl_set_timeout((key_serial_t) arg2,
(unsigned) arg3);
case KEYCTL_ASSUME_AUTHORITY:
return keyctl_assume_authority((key_serial_t) arg2);
case KEYCTL_GET_SECURITY:
return keyctl_get_security((key_serial_t) arg2,
(char __user *) arg3,
(size_t) arg4);
case KEYCTL_SESSION_TO_PARENT:
return keyctl_session_to_parent();
case KEYCTL_REJECT:
return keyctl_reject_key((key_serial_t) arg2,
(unsigned) arg3,
(unsigned) arg4,
(key_serial_t) arg5);
case KEYCTL_INSTANTIATE_IOV:
return keyctl_instantiate_key_iov(
(key_serial_t) arg2,
(const struct iovec __user *) arg3,
(unsigned) arg4,
(key_serial_t) arg5);
case KEYCTL_INVALIDATE:
return keyctl_invalidate_key((key_serial_t) arg2);
case KEYCTL_GET_PERSISTENT:
return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3);
case KEYCTL_DH_COMPUTE:
return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2,
(char __user *) arg3, (size_t) arg4,
(void __user *) arg5);
default:
return -EOPNOTSUPP;
}
}
| ./CrossVul/dataset_final_sorted/CWE-404/c/bad_3267_0 |
crossvul-cpp_data_good_3351_6 | /*
* Server-side XDR for NFSv4
*
* Copyright (c) 2002 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <kmsmith@umich.edu>
* Andy Adamson <andros@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/fs_struct.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/statfs.h>
#include <linux/utsname.h>
#include <linux/pagemap.h>
#include <linux/sunrpc/svcauth_gss.h>
#include "idmap.h"
#include "acl.h"
#include "xdr4.h"
#include "vfs.h"
#include "state.h"
#include "cache.h"
#include "netns.h"
#include "pnfs.h"
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
#include <linux/security.h>
#endif
#define NFSDDBG_FACILITY NFSDDBG_XDR
const u32 nfsd_suppattrs[3][3] = {
{NFSD4_SUPPORTED_ATTRS_WORD0,
NFSD4_SUPPORTED_ATTRS_WORD1,
NFSD4_SUPPORTED_ATTRS_WORD2},
{NFSD4_1_SUPPORTED_ATTRS_WORD0,
NFSD4_1_SUPPORTED_ATTRS_WORD1,
NFSD4_1_SUPPORTED_ATTRS_WORD2},
{NFSD4_1_SUPPORTED_ATTRS_WORD0,
NFSD4_1_SUPPORTED_ATTRS_WORD1,
NFSD4_2_SUPPORTED_ATTRS_WORD2},
};
/*
* As per referral draft, the fsid for a referral MUST be different from the fsid of the containing
* directory in order to indicate to the client that a filesystem boundary is present
* We use a fixed fsid for a referral
*/
#define NFS4_REFERRAL_FSID_MAJOR 0x8000000ULL
#define NFS4_REFERRAL_FSID_MINOR 0x8000000ULL
static __be32
check_filename(char *str, int len)
{
int i;
if (len == 0)
return nfserr_inval;
if (isdotent(str, len))
return nfserr_badname;
for (i = 0; i < len; i++)
if (str[i] == '/')
return nfserr_badname;
return 0;
}
#define DECODE_HEAD \
__be32 *p; \
__be32 status
#define DECODE_TAIL \
status = 0; \
out: \
return status; \
xdr_error: \
dprintk("NFSD: xdr error (%s:%d)\n", \
__FILE__, __LINE__); \
status = nfserr_bad_xdr; \
goto out
#define READMEM(x,nbytes) do { \
x = (char *)p; \
p += XDR_QUADLEN(nbytes); \
} while (0)
#define SAVEMEM(x,nbytes) do { \
if (!(x = (p==argp->tmp || p == argp->tmpp) ? \
savemem(argp, p, nbytes) : \
(char *)p)) { \
dprintk("NFSD: xdr error (%s:%d)\n", \
__FILE__, __LINE__); \
goto xdr_error; \
} \
p += XDR_QUADLEN(nbytes); \
} while (0)
#define COPYMEM(x,nbytes) do { \
memcpy((x), p, nbytes); \
p += XDR_QUADLEN(nbytes); \
} while (0)
/* READ_BUF, read_buf(): nbytes must be <= PAGE_SIZE */
#define READ_BUF(nbytes) do { \
if (nbytes <= (u32)((char *)argp->end - (char *)argp->p)) { \
p = argp->p; \
argp->p += XDR_QUADLEN(nbytes); \
} else if (!(p = read_buf(argp, nbytes))) { \
dprintk("NFSD: xdr error (%s:%d)\n", \
__FILE__, __LINE__); \
goto xdr_error; \
} \
} while (0)
static void next_decode_page(struct nfsd4_compoundargs *argp)
{
argp->p = page_address(argp->pagelist[0]);
argp->pagelist++;
if (argp->pagelen < PAGE_SIZE) {
argp->end = argp->p + (argp->pagelen>>2);
argp->pagelen = 0;
} else {
argp->end = argp->p + (PAGE_SIZE>>2);
argp->pagelen -= PAGE_SIZE;
}
}
static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
{
/* We want more bytes than seem to be available.
* Maybe we need a new page, maybe we have just run out
*/
unsigned int avail = (char *)argp->end - (char *)argp->p;
__be32 *p;
if (avail + argp->pagelen < nbytes)
return NULL;
if (avail + PAGE_SIZE < nbytes) /* need more than a page !! */
return NULL;
/* ok, we can do it with the current plus the next page */
if (nbytes <= sizeof(argp->tmp))
p = argp->tmp;
else {
kfree(argp->tmpp);
p = argp->tmpp = kmalloc(nbytes, GFP_KERNEL);
if (!p)
return NULL;
}
/*
* The following memcpy is safe because read_buf is always
* called with nbytes > avail, and the two cases above both
* guarantee p points to at least nbytes bytes.
*/
memcpy(p, argp->p, avail);
next_decode_page(argp);
memcpy(((char*)p)+avail, argp->p, (nbytes - avail));
argp->p += XDR_QUADLEN(nbytes - avail);
return p;
}
static int zero_clientid(clientid_t *clid)
{
return (clid->cl_boot == 0) && (clid->cl_id == 0);
}
/**
* svcxdr_tmpalloc - allocate memory to be freed after compound processing
* @argp: NFSv4 compound argument structure
* @p: pointer to be freed (with kfree())
*
* Marks @p to be freed when processing the compound operation
* described in @argp finishes.
*/
static void *
svcxdr_tmpalloc(struct nfsd4_compoundargs *argp, u32 len)
{
struct svcxdr_tmpbuf *tb;
tb = kmalloc(sizeof(*tb) + len, GFP_KERNEL);
if (!tb)
return NULL;
tb->next = argp->to_free;
argp->to_free = tb;
return tb->buf;
}
/*
* For xdr strings that need to be passed to other kernel api's
* as null-terminated strings.
*
* Note null-terminating in place usually isn't safe since the
* buffer might end on a page boundary.
*/
static char *
svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, u32 len)
{
char *p = svcxdr_tmpalloc(argp, len + 1);
if (!p)
return NULL;
memcpy(p, buf, len);
p[len] = '\0';
return p;
}
/**
* savemem - duplicate a chunk of memory for later processing
* @argp: NFSv4 compound argument structure to be freed with
* @p: pointer to be duplicated
* @nbytes: length to be duplicated
*
* Returns a pointer to a copy of @nbytes bytes of memory at @p
* that are preserved until processing of the NFSv4 compound
* operation described by @argp finishes.
*/
static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes)
{
void *ret;
ret = svcxdr_tmpalloc(argp, nbytes);
if (!ret)
return NULL;
memcpy(ret, p, nbytes);
return ret;
}
/*
* We require the high 32 bits of 'seconds' to be 0, and
* we ignore all 32 bits of 'nseconds'.
*/
static __be32
nfsd4_decode_time(struct nfsd4_compoundargs *argp, struct timespec *tv)
{
DECODE_HEAD;
u64 sec;
READ_BUF(12);
p = xdr_decode_hyper(p, &sec);
tv->tv_sec = sec;
tv->tv_nsec = be32_to_cpup(p++);
if (tv->tv_nsec >= (u32)1000000000)
return nfserr_inval;
DECODE_TAIL;
}
static __be32
nfsd4_decode_bitmap(struct nfsd4_compoundargs *argp, u32 *bmval)
{
u32 bmlen;
DECODE_HEAD;
bmval[0] = 0;
bmval[1] = 0;
bmval[2] = 0;
READ_BUF(4);
bmlen = be32_to_cpup(p++);
if (bmlen > 1000)
goto xdr_error;
READ_BUF(bmlen << 2);
if (bmlen > 0)
bmval[0] = be32_to_cpup(p++);
if (bmlen > 1)
bmval[1] = be32_to_cpup(p++);
if (bmlen > 2)
bmval[2] = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
struct iattr *iattr, struct nfs4_acl **acl,
struct xdr_netobj *label, int *umask)
{
int expected_len, len = 0;
u32 dummy32;
char *buf;
DECODE_HEAD;
iattr->ia_valid = 0;
if ((status = nfsd4_decode_bitmap(argp, bmval)))
return status;
if (bmval[0] & ~NFSD_WRITEABLE_ATTRS_WORD0
|| bmval[1] & ~NFSD_WRITEABLE_ATTRS_WORD1
|| bmval[2] & ~NFSD_WRITEABLE_ATTRS_WORD2) {
if (nfsd_attrs_supported(argp->minorversion, bmval))
return nfserr_inval;
return nfserr_attrnotsupp;
}
READ_BUF(4);
expected_len = be32_to_cpup(p++);
if (bmval[0] & FATTR4_WORD0_SIZE) {
READ_BUF(8);
len += 8;
p = xdr_decode_hyper(p, &iattr->ia_size);
iattr->ia_valid |= ATTR_SIZE;
}
if (bmval[0] & FATTR4_WORD0_ACL) {
u32 nace;
struct nfs4_ace *ace;
READ_BUF(4); len += 4;
nace = be32_to_cpup(p++);
if (nace > NFS4_ACL_MAX)
return nfserr_fbig;
*acl = svcxdr_tmpalloc(argp, nfs4_acl_bytes(nace));
if (*acl == NULL)
return nfserr_jukebox;
(*acl)->naces = nace;
for (ace = (*acl)->aces; ace < (*acl)->aces + nace; ace++) {
READ_BUF(16); len += 16;
ace->type = be32_to_cpup(p++);
ace->flag = be32_to_cpup(p++);
ace->access_mask = be32_to_cpup(p++);
dummy32 = be32_to_cpup(p++);
READ_BUF(dummy32);
len += XDR_QUADLEN(dummy32) << 2;
READMEM(buf, dummy32);
ace->whotype = nfs4_acl_get_whotype(buf, dummy32);
status = nfs_ok;
if (ace->whotype != NFS4_ACL_WHO_NAMED)
;
else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
status = nfsd_map_name_to_gid(argp->rqstp,
buf, dummy32, &ace->who_gid);
else
status = nfsd_map_name_to_uid(argp->rqstp,
buf, dummy32, &ace->who_uid);
if (status)
return status;
}
} else
*acl = NULL;
if (bmval[1] & FATTR4_WORD1_MODE) {
READ_BUF(4);
len += 4;
iattr->ia_mode = be32_to_cpup(p++);
iattr->ia_mode &= (S_IFMT | S_IALLUGO);
iattr->ia_valid |= ATTR_MODE;
}
if (bmval[1] & FATTR4_WORD1_OWNER) {
READ_BUF(4);
len += 4;
dummy32 = be32_to_cpup(p++);
READ_BUF(dummy32);
len += (XDR_QUADLEN(dummy32) << 2);
READMEM(buf, dummy32);
if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
return status;
iattr->ia_valid |= ATTR_UID;
}
if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) {
READ_BUF(4);
len += 4;
dummy32 = be32_to_cpup(p++);
READ_BUF(dummy32);
len += (XDR_QUADLEN(dummy32) << 2);
READMEM(buf, dummy32);
if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
return status;
iattr->ia_valid |= ATTR_GID;
}
if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
READ_BUF(4);
len += 4;
dummy32 = be32_to_cpup(p++);
switch (dummy32) {
case NFS4_SET_TO_CLIENT_TIME:
len += 12;
status = nfsd4_decode_time(argp, &iattr->ia_atime);
if (status)
return status;
iattr->ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
break;
case NFS4_SET_TO_SERVER_TIME:
iattr->ia_valid |= ATTR_ATIME;
break;
default:
goto xdr_error;
}
}
if (bmval[1] & FATTR4_WORD1_TIME_MODIFY_SET) {
READ_BUF(4);
len += 4;
dummy32 = be32_to_cpup(p++);
switch (dummy32) {
case NFS4_SET_TO_CLIENT_TIME:
len += 12;
status = nfsd4_decode_time(argp, &iattr->ia_mtime);
if (status)
return status;
iattr->ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
break;
case NFS4_SET_TO_SERVER_TIME:
iattr->ia_valid |= ATTR_MTIME;
break;
default:
goto xdr_error;
}
}
label->len = 0;
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
READ_BUF(4);
len += 4;
dummy32 = be32_to_cpup(p++); /* lfs: we don't use it */
READ_BUF(4);
len += 4;
dummy32 = be32_to_cpup(p++); /* pi: we don't use it either */
READ_BUF(4);
len += 4;
dummy32 = be32_to_cpup(p++);
READ_BUF(dummy32);
if (dummy32 > NFS4_MAXLABELLEN)
return nfserr_badlabel;
len += (XDR_QUADLEN(dummy32) << 2);
READMEM(buf, dummy32);
label->len = dummy32;
label->data = svcxdr_dupstr(argp, buf, dummy32);
if (!label->data)
return nfserr_jukebox;
}
#endif
if (bmval[2] & FATTR4_WORD2_MODE_UMASK) {
if (!umask)
goto xdr_error;
READ_BUF(8);
len += 8;
dummy32 = be32_to_cpup(p++);
iattr->ia_mode = dummy32 & (S_IFMT | S_IALLUGO);
dummy32 = be32_to_cpup(p++);
*umask = dummy32 & S_IRWXUGO;
iattr->ia_valid |= ATTR_MODE;
}
if (len != expected_len)
goto xdr_error;
DECODE_TAIL;
}
static __be32
nfsd4_decode_stateid(struct nfsd4_compoundargs *argp, stateid_t *sid)
{
DECODE_HEAD;
READ_BUF(sizeof(stateid_t));
sid->si_generation = be32_to_cpup(p++);
COPYMEM(&sid->si_opaque, sizeof(stateid_opaque_t));
DECODE_TAIL;
}
static __be32
nfsd4_decode_access(struct nfsd4_compoundargs *argp, struct nfsd4_access *access)
{
DECODE_HEAD;
READ_BUF(4);
access->ac_req_access = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32 nfsd4_decode_cb_sec(struct nfsd4_compoundargs *argp, struct nfsd4_cb_sec *cbs)
{
DECODE_HEAD;
u32 dummy, uid, gid;
char *machine_name;
int i;
int nr_secflavs;
/* callback_sec_params4 */
READ_BUF(4);
nr_secflavs = be32_to_cpup(p++);
if (nr_secflavs)
cbs->flavor = (u32)(-1);
else
/* Is this legal? Be generous, take it to mean AUTH_NONE: */
cbs->flavor = 0;
for (i = 0; i < nr_secflavs; ++i) {
READ_BUF(4);
dummy = be32_to_cpup(p++);
switch (dummy) {
case RPC_AUTH_NULL:
/* Nothing to read */
if (cbs->flavor == (u32)(-1))
cbs->flavor = RPC_AUTH_NULL;
break;
case RPC_AUTH_UNIX:
READ_BUF(8);
/* stamp */
dummy = be32_to_cpup(p++);
/* machine name */
dummy = be32_to_cpup(p++);
READ_BUF(dummy);
SAVEMEM(machine_name, dummy);
/* uid, gid */
READ_BUF(8);
uid = be32_to_cpup(p++);
gid = be32_to_cpup(p++);
/* more gids */
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy * 4);
if (cbs->flavor == (u32)(-1)) {
kuid_t kuid = make_kuid(&init_user_ns, uid);
kgid_t kgid = make_kgid(&init_user_ns, gid);
if (uid_valid(kuid) && gid_valid(kgid)) {
cbs->uid = kuid;
cbs->gid = kgid;
cbs->flavor = RPC_AUTH_UNIX;
} else {
dprintk("RPC_AUTH_UNIX with invalid"
"uid or gid ignoring!\n");
}
}
break;
case RPC_AUTH_GSS:
dprintk("RPC_AUTH_GSS callback secflavor "
"not supported!\n");
READ_BUF(8);
/* gcbp_service */
dummy = be32_to_cpup(p++);
/* gcbp_handle_from_server */
dummy = be32_to_cpup(p++);
READ_BUF(dummy);
p += XDR_QUADLEN(dummy);
/* gcbp_handle_from_client */
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy);
break;
default:
dprintk("Illegal callback secflavor\n");
return nfserr_inval;
}
}
DECODE_TAIL;
}
static __be32 nfsd4_decode_backchannel_ctl(struct nfsd4_compoundargs *argp, struct nfsd4_backchannel_ctl *bc)
{
DECODE_HEAD;
READ_BUF(4);
bc->bc_cb_program = be32_to_cpup(p++);
nfsd4_decode_cb_sec(argp, &bc->bc_cb_sec);
DECODE_TAIL;
}
static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp, struct nfsd4_bind_conn_to_session *bcts)
{
DECODE_HEAD;
READ_BUF(NFS4_MAX_SESSIONID_LEN + 8);
COPYMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN);
bcts->dir = be32_to_cpup(p++);
/* XXX: skipping ctsa_use_conn_in_rdma_mode. Perhaps Tom Tucker
* could help us figure out we should be using it. */
DECODE_TAIL;
}
static __be32
nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
{
DECODE_HEAD;
READ_BUF(4);
close->cl_seqid = be32_to_cpup(p++);
return nfsd4_decode_stateid(argp, &close->cl_stateid);
DECODE_TAIL;
}
static __be32
nfsd4_decode_commit(struct nfsd4_compoundargs *argp, struct nfsd4_commit *commit)
{
DECODE_HEAD;
READ_BUF(12);
p = xdr_decode_hyper(p, &commit->co_offset);
commit->co_count = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create)
{
DECODE_HEAD;
READ_BUF(4);
create->cr_type = be32_to_cpup(p++);
switch (create->cr_type) {
case NF4LNK:
READ_BUF(4);
create->cr_datalen = be32_to_cpup(p++);
READ_BUF(create->cr_datalen);
create->cr_data = svcxdr_dupstr(argp, p, create->cr_datalen);
if (!create->cr_data)
return nfserr_jukebox;
break;
case NF4BLK:
case NF4CHR:
READ_BUF(8);
create->cr_specdata1 = be32_to_cpup(p++);
create->cr_specdata2 = be32_to_cpup(p++);
break;
case NF4SOCK:
case NF4FIFO:
case NF4DIR:
default:
break;
}
READ_BUF(4);
create->cr_namelen = be32_to_cpup(p++);
READ_BUF(create->cr_namelen);
SAVEMEM(create->cr_name, create->cr_namelen);
if ((status = check_filename(create->cr_name, create->cr_namelen)))
return status;
status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr,
&create->cr_acl, &create->cr_label,
¤t->fs->umask);
if (status)
goto out;
DECODE_TAIL;
}
static inline __be32
nfsd4_decode_delegreturn(struct nfsd4_compoundargs *argp, struct nfsd4_delegreturn *dr)
{
return nfsd4_decode_stateid(argp, &dr->dr_stateid);
}
static inline __be32
nfsd4_decode_getattr(struct nfsd4_compoundargs *argp, struct nfsd4_getattr *getattr)
{
return nfsd4_decode_bitmap(argp, getattr->ga_bmval);
}
static __be32
nfsd4_decode_link(struct nfsd4_compoundargs *argp, struct nfsd4_link *link)
{
DECODE_HEAD;
READ_BUF(4);
link->li_namelen = be32_to_cpup(p++);
READ_BUF(link->li_namelen);
SAVEMEM(link->li_name, link->li_namelen);
if ((status = check_filename(link->li_name, link->li_namelen)))
return status;
DECODE_TAIL;
}
static __be32
nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
{
DECODE_HEAD;
/*
* type, reclaim(boolean), offset, length, new_lock_owner(boolean)
*/
READ_BUF(28);
lock->lk_type = be32_to_cpup(p++);
if ((lock->lk_type < NFS4_READ_LT) || (lock->lk_type > NFS4_WRITEW_LT))
goto xdr_error;
lock->lk_reclaim = be32_to_cpup(p++);
p = xdr_decode_hyper(p, &lock->lk_offset);
p = xdr_decode_hyper(p, &lock->lk_length);
lock->lk_is_new = be32_to_cpup(p++);
if (lock->lk_is_new) {
READ_BUF(4);
lock->lk_new_open_seqid = be32_to_cpup(p++);
status = nfsd4_decode_stateid(argp, &lock->lk_new_open_stateid);
if (status)
return status;
READ_BUF(8 + sizeof(clientid_t));
lock->lk_new_lock_seqid = be32_to_cpup(p++);
COPYMEM(&lock->lk_new_clientid, sizeof(clientid_t));
lock->lk_new_owner.len = be32_to_cpup(p++);
READ_BUF(lock->lk_new_owner.len);
READMEM(lock->lk_new_owner.data, lock->lk_new_owner.len);
} else {
status = nfsd4_decode_stateid(argp, &lock->lk_old_lock_stateid);
if (status)
return status;
READ_BUF(4);
lock->lk_old_lock_seqid = be32_to_cpup(p++);
}
DECODE_TAIL;
}
static __be32
nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, struct nfsd4_lockt *lockt)
{
DECODE_HEAD;
READ_BUF(32);
lockt->lt_type = be32_to_cpup(p++);
if((lockt->lt_type < NFS4_READ_LT) || (lockt->lt_type > NFS4_WRITEW_LT))
goto xdr_error;
p = xdr_decode_hyper(p, &lockt->lt_offset);
p = xdr_decode_hyper(p, &lockt->lt_length);
COPYMEM(&lockt->lt_clientid, 8);
lockt->lt_owner.len = be32_to_cpup(p++);
READ_BUF(lockt->lt_owner.len);
READMEM(lockt->lt_owner.data, lockt->lt_owner.len);
DECODE_TAIL;
}
static __be32
nfsd4_decode_locku(struct nfsd4_compoundargs *argp, struct nfsd4_locku *locku)
{
DECODE_HEAD;
READ_BUF(8);
locku->lu_type = be32_to_cpup(p++);
if ((locku->lu_type < NFS4_READ_LT) || (locku->lu_type > NFS4_WRITEW_LT))
goto xdr_error;
locku->lu_seqid = be32_to_cpup(p++);
status = nfsd4_decode_stateid(argp, &locku->lu_stateid);
if (status)
return status;
READ_BUF(16);
p = xdr_decode_hyper(p, &locku->lu_offset);
p = xdr_decode_hyper(p, &locku->lu_length);
DECODE_TAIL;
}
static __be32
nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, struct nfsd4_lookup *lookup)
{
DECODE_HEAD;
READ_BUF(4);
lookup->lo_len = be32_to_cpup(p++);
READ_BUF(lookup->lo_len);
SAVEMEM(lookup->lo_name, lookup->lo_len);
if ((status = check_filename(lookup->lo_name, lookup->lo_len)))
return status;
DECODE_TAIL;
}
static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *share_access, u32 *deleg_want, u32 *deleg_when)
{
__be32 *p;
u32 w;
READ_BUF(4);
w = be32_to_cpup(p++);
*share_access = w & NFS4_SHARE_ACCESS_MASK;
*deleg_want = w & NFS4_SHARE_WANT_MASK;
if (deleg_when)
*deleg_when = w & NFS4_SHARE_WHEN_MASK;
switch (w & NFS4_SHARE_ACCESS_MASK) {
case NFS4_SHARE_ACCESS_READ:
case NFS4_SHARE_ACCESS_WRITE:
case NFS4_SHARE_ACCESS_BOTH:
break;
default:
return nfserr_bad_xdr;
}
w &= ~NFS4_SHARE_ACCESS_MASK;
if (!w)
return nfs_ok;
if (!argp->minorversion)
return nfserr_bad_xdr;
switch (w & NFS4_SHARE_WANT_MASK) {
case NFS4_SHARE_WANT_NO_PREFERENCE:
case NFS4_SHARE_WANT_READ_DELEG:
case NFS4_SHARE_WANT_WRITE_DELEG:
case NFS4_SHARE_WANT_ANY_DELEG:
case NFS4_SHARE_WANT_NO_DELEG:
case NFS4_SHARE_WANT_CANCEL:
break;
default:
return nfserr_bad_xdr;
}
w &= ~NFS4_SHARE_WANT_MASK;
if (!w)
return nfs_ok;
if (!deleg_when) /* open_downgrade */
return nfserr_inval;
switch (w) {
case NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL:
case NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED:
case (NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL |
NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED):
return nfs_ok;
}
xdr_error:
return nfserr_bad_xdr;
}
static __be32 nfsd4_decode_share_deny(struct nfsd4_compoundargs *argp, u32 *x)
{
__be32 *p;
READ_BUF(4);
*x = be32_to_cpup(p++);
/* Note: unlinke access bits, deny bits may be zero. */
if (*x & ~NFS4_SHARE_DENY_BOTH)
return nfserr_bad_xdr;
return nfs_ok;
xdr_error:
return nfserr_bad_xdr;
}
static __be32 nfsd4_decode_opaque(struct nfsd4_compoundargs *argp, struct xdr_netobj *o)
{
__be32 *p;
READ_BUF(4);
o->len = be32_to_cpup(p++);
if (o->len == 0 || o->len > NFS4_OPAQUE_LIMIT)
return nfserr_bad_xdr;
READ_BUF(o->len);
SAVEMEM(o->data, o->len);
return nfs_ok;
xdr_error:
return nfserr_bad_xdr;
}
static __be32
nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
{
DECODE_HEAD;
u32 dummy;
memset(open->op_bmval, 0, sizeof(open->op_bmval));
open->op_iattr.ia_valid = 0;
open->op_openowner = NULL;
open->op_xdr_error = 0;
/* seqid, share_access, share_deny, clientid, ownerlen */
READ_BUF(4);
open->op_seqid = be32_to_cpup(p++);
/* decode, yet ignore deleg_when until supported */
status = nfsd4_decode_share_access(argp, &open->op_share_access,
&open->op_deleg_want, &dummy);
if (status)
goto xdr_error;
status = nfsd4_decode_share_deny(argp, &open->op_share_deny);
if (status)
goto xdr_error;
READ_BUF(sizeof(clientid_t));
COPYMEM(&open->op_clientid, sizeof(clientid_t));
status = nfsd4_decode_opaque(argp, &open->op_owner);
if (status)
goto xdr_error;
READ_BUF(4);
open->op_create = be32_to_cpup(p++);
switch (open->op_create) {
case NFS4_OPEN_NOCREATE:
break;
case NFS4_OPEN_CREATE:
current->fs->umask = 0;
READ_BUF(4);
open->op_createmode = be32_to_cpup(p++);
switch (open->op_createmode) {
case NFS4_CREATE_UNCHECKED:
case NFS4_CREATE_GUARDED:
status = nfsd4_decode_fattr(argp, open->op_bmval,
&open->op_iattr, &open->op_acl, &open->op_label,
¤t->fs->umask);
if (status)
goto out;
break;
case NFS4_CREATE_EXCLUSIVE:
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
break;
case NFS4_CREATE_EXCLUSIVE4_1:
if (argp->minorversion < 1)
goto xdr_error;
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
status = nfsd4_decode_fattr(argp, open->op_bmval,
&open->op_iattr, &open->op_acl, &open->op_label,
¤t->fs->umask);
if (status)
goto out;
break;
default:
goto xdr_error;
}
break;
default:
goto xdr_error;
}
/* open_claim */
READ_BUF(4);
open->op_claim_type = be32_to_cpup(p++);
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_NULL:
case NFS4_OPEN_CLAIM_DELEGATE_PREV:
READ_BUF(4);
open->op_fname.len = be32_to_cpup(p++);
READ_BUF(open->op_fname.len);
SAVEMEM(open->op_fname.data, open->op_fname.len);
if ((status = check_filename(open->op_fname.data, open->op_fname.len)))
return status;
break;
case NFS4_OPEN_CLAIM_PREVIOUS:
READ_BUF(4);
open->op_delegate_type = be32_to_cpup(p++);
break;
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
status = nfsd4_decode_stateid(argp, &open->op_delegate_stateid);
if (status)
return status;
READ_BUF(4);
open->op_fname.len = be32_to_cpup(p++);
READ_BUF(open->op_fname.len);
SAVEMEM(open->op_fname.data, open->op_fname.len);
if ((status = check_filename(open->op_fname.data, open->op_fname.len)))
return status;
break;
case NFS4_OPEN_CLAIM_FH:
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
if (argp->minorversion < 1)
goto xdr_error;
/* void */
break;
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
if (argp->minorversion < 1)
goto xdr_error;
status = nfsd4_decode_stateid(argp, &open->op_delegate_stateid);
if (status)
return status;
break;
default:
goto xdr_error;
}
DECODE_TAIL;
}
static __be32
nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_confirm *open_conf)
{
DECODE_HEAD;
if (argp->minorversion >= 1)
return nfserr_notsupp;
status = nfsd4_decode_stateid(argp, &open_conf->oc_req_stateid);
if (status)
return status;
READ_BUF(4);
open_conf->oc_seqid = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_downgrade *open_down)
{
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &open_down->od_stateid);
if (status)
return status;
READ_BUF(4);
open_down->od_seqid = be32_to_cpup(p++);
status = nfsd4_decode_share_access(argp, &open_down->od_share_access,
&open_down->od_deleg_want, NULL);
if (status)
return status;
status = nfsd4_decode_share_deny(argp, &open_down->od_share_deny);
if (status)
return status;
DECODE_TAIL;
}
static __be32
nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh)
{
DECODE_HEAD;
READ_BUF(4);
putfh->pf_fhlen = be32_to_cpup(p++);
if (putfh->pf_fhlen > NFS4_FHSIZE)
goto xdr_error;
READ_BUF(putfh->pf_fhlen);
SAVEMEM(putfh->pf_fhval, putfh->pf_fhlen);
DECODE_TAIL;
}
static __be32
nfsd4_decode_putpubfh(struct nfsd4_compoundargs *argp, void *p)
{
if (argp->minorversion == 0)
return nfs_ok;
return nfserr_notsupp;
}
static __be32
nfsd4_decode_read(struct nfsd4_compoundargs *argp, struct nfsd4_read *read)
{
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &read->rd_stateid);
if (status)
return status;
READ_BUF(12);
p = xdr_decode_hyper(p, &read->rd_offset);
read->rd_length = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_readdir(struct nfsd4_compoundargs *argp, struct nfsd4_readdir *readdir)
{
DECODE_HEAD;
READ_BUF(24);
p = xdr_decode_hyper(p, &readdir->rd_cookie);
COPYMEM(readdir->rd_verf.data, sizeof(readdir->rd_verf.data));
readdir->rd_dircount = be32_to_cpup(p++);
readdir->rd_maxcount = be32_to_cpup(p++);
if ((status = nfsd4_decode_bitmap(argp, readdir->rd_bmval)))
goto out;
DECODE_TAIL;
}
static __be32
nfsd4_decode_remove(struct nfsd4_compoundargs *argp, struct nfsd4_remove *remove)
{
DECODE_HEAD;
READ_BUF(4);
remove->rm_namelen = be32_to_cpup(p++);
READ_BUF(remove->rm_namelen);
SAVEMEM(remove->rm_name, remove->rm_namelen);
if ((status = check_filename(remove->rm_name, remove->rm_namelen)))
return status;
DECODE_TAIL;
}
static __be32
nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename)
{
DECODE_HEAD;
READ_BUF(4);
rename->rn_snamelen = be32_to_cpup(p++);
READ_BUF(rename->rn_snamelen);
SAVEMEM(rename->rn_sname, rename->rn_snamelen);
READ_BUF(4);
rename->rn_tnamelen = be32_to_cpup(p++);
READ_BUF(rename->rn_tnamelen);
SAVEMEM(rename->rn_tname, rename->rn_tnamelen);
if ((status = check_filename(rename->rn_sname, rename->rn_snamelen)))
return status;
if ((status = check_filename(rename->rn_tname, rename->rn_tnamelen)))
return status;
DECODE_TAIL;
}
static __be32
nfsd4_decode_renew(struct nfsd4_compoundargs *argp, clientid_t *clientid)
{
DECODE_HEAD;
if (argp->minorversion >= 1)
return nfserr_notsupp;
READ_BUF(sizeof(clientid_t));
COPYMEM(clientid, sizeof(clientid_t));
DECODE_TAIL;
}
static __be32
nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp,
struct nfsd4_secinfo *secinfo)
{
DECODE_HEAD;
READ_BUF(4);
secinfo->si_namelen = be32_to_cpup(p++);
READ_BUF(secinfo->si_namelen);
SAVEMEM(secinfo->si_name, secinfo->si_namelen);
status = check_filename(secinfo->si_name, secinfo->si_namelen);
if (status)
return status;
DECODE_TAIL;
}
static __be32
nfsd4_decode_secinfo_no_name(struct nfsd4_compoundargs *argp,
struct nfsd4_secinfo_no_name *sin)
{
DECODE_HEAD;
READ_BUF(4);
sin->sin_style = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr)
{
__be32 status;
status = nfsd4_decode_stateid(argp, &setattr->sa_stateid);
if (status)
return status;
return nfsd4_decode_fattr(argp, setattr->sa_bmval, &setattr->sa_iattr,
&setattr->sa_acl, &setattr->sa_label, NULL);
}
static __be32
nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid *setclientid)
{
DECODE_HEAD;
if (argp->minorversion >= 1)
return nfserr_notsupp;
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(setclientid->se_verf.data, NFS4_VERIFIER_SIZE);
status = nfsd4_decode_opaque(argp, &setclientid->se_name);
if (status)
return nfserr_bad_xdr;
READ_BUF(8);
setclientid->se_callback_prog = be32_to_cpup(p++);
setclientid->se_callback_netid_len = be32_to_cpup(p++);
READ_BUF(setclientid->se_callback_netid_len);
SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len);
READ_BUF(4);
setclientid->se_callback_addr_len = be32_to_cpup(p++);
READ_BUF(setclientid->se_callback_addr_len);
SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len);
READ_BUF(4);
setclientid->se_callback_ident = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid_confirm *scd_c)
{
DECODE_HEAD;
if (argp->minorversion >= 1)
return nfserr_notsupp;
READ_BUF(8 + NFS4_VERIFIER_SIZE);
COPYMEM(&scd_c->sc_clientid, 8);
COPYMEM(&scd_c->sc_confirm, NFS4_VERIFIER_SIZE);
DECODE_TAIL;
}
/* Also used for NVERIFY */
static __be32
nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify)
{
DECODE_HEAD;
if ((status = nfsd4_decode_bitmap(argp, verify->ve_bmval)))
goto out;
/* For convenience's sake, we compare raw xdr'd attributes in
* nfsd4_proc_verify */
READ_BUF(4);
verify->ve_attrlen = be32_to_cpup(p++);
READ_BUF(verify->ve_attrlen);
SAVEMEM(verify->ve_attrval, verify->ve_attrlen);
DECODE_TAIL;
}
static __be32
nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
{
int avail;
int len;
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &write->wr_stateid);
if (status)
return status;
READ_BUF(16);
p = xdr_decode_hyper(p, &write->wr_offset);
write->wr_stable_how = be32_to_cpup(p++);
if (write->wr_stable_how > NFS_FILE_SYNC)
goto xdr_error;
write->wr_buflen = be32_to_cpup(p++);
/* Sorry .. no magic macros for this.. *
* READ_BUF(write->wr_buflen);
* SAVEMEM(write->wr_buf, write->wr_buflen);
*/
avail = (char*)argp->end - (char*)argp->p;
if (avail + argp->pagelen < write->wr_buflen) {
dprintk("NFSD: xdr error (%s:%d)\n",
__FILE__, __LINE__);
goto xdr_error;
}
write->wr_head.iov_base = p;
write->wr_head.iov_len = avail;
write->wr_pagelist = argp->pagelist;
len = XDR_QUADLEN(write->wr_buflen) << 2;
if (len >= avail) {
int pages;
len -= avail;
pages = len >> PAGE_SHIFT;
argp->pagelist += pages;
argp->pagelen -= pages * PAGE_SIZE;
len -= pages * PAGE_SIZE;
argp->p = (__be32 *)page_address(argp->pagelist[0]);
argp->pagelist++;
argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
}
argp->p += XDR_QUADLEN(len);
DECODE_TAIL;
}
static __be32
nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp, struct nfsd4_release_lockowner *rlockowner)
{
DECODE_HEAD;
if (argp->minorversion >= 1)
return nfserr_notsupp;
READ_BUF(12);
COPYMEM(&rlockowner->rl_clientid, sizeof(clientid_t));
rlockowner->rl_owner.len = be32_to_cpup(p++);
READ_BUF(rlockowner->rl_owner.len);
READMEM(rlockowner->rl_owner.data, rlockowner->rl_owner.len);
if (argp->minorversion && !zero_clientid(&rlockowner->rl_clientid))
return nfserr_inval;
DECODE_TAIL;
}
static __be32
nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
struct nfsd4_exchange_id *exid)
{
int dummy, tmp;
DECODE_HEAD;
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(exid->verifier.data, NFS4_VERIFIER_SIZE);
status = nfsd4_decode_opaque(argp, &exid->clname);
if (status)
return nfserr_bad_xdr;
READ_BUF(4);
exid->flags = be32_to_cpup(p++);
/* Ignore state_protect4_a */
READ_BUF(4);
exid->spa_how = be32_to_cpup(p++);
switch (exid->spa_how) {
case SP4_NONE:
break;
case SP4_MACH_CRED:
/* spo_must_enforce */
status = nfsd4_decode_bitmap(argp,
exid->spo_must_enforce);
if (status)
goto out;
/* spo_must_allow */
status = nfsd4_decode_bitmap(argp, exid->spo_must_allow);
if (status)
goto out;
break;
case SP4_SSV:
/* ssp_ops */
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy * 4);
p += dummy;
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy * 4);
p += dummy;
/* ssp_hash_algs<> */
READ_BUF(4);
tmp = be32_to_cpup(p++);
while (tmp--) {
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy);
p += XDR_QUADLEN(dummy);
}
/* ssp_encr_algs<> */
READ_BUF(4);
tmp = be32_to_cpup(p++);
while (tmp--) {
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy);
p += XDR_QUADLEN(dummy);
}
/* ssp_window and ssp_num_gss_handles */
READ_BUF(8);
dummy = be32_to_cpup(p++);
dummy = be32_to_cpup(p++);
break;
default:
goto xdr_error;
}
/* Ignore Implementation ID */
READ_BUF(4); /* nfs_impl_id4 array length */
dummy = be32_to_cpup(p++);
if (dummy > 1)
goto xdr_error;
if (dummy == 1) {
/* nii_domain */
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy);
p += XDR_QUADLEN(dummy);
/* nii_name */
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy);
p += XDR_QUADLEN(dummy);
/* nii_date */
READ_BUF(12);
p += 3;
}
DECODE_TAIL;
}
static __be32
nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
struct nfsd4_create_session *sess)
{
DECODE_HEAD;
u32 dummy;
READ_BUF(16);
COPYMEM(&sess->clientid, 8);
sess->seqid = be32_to_cpup(p++);
sess->flags = be32_to_cpup(p++);
/* Fore channel attrs */
READ_BUF(28);
dummy = be32_to_cpup(p++); /* headerpadsz is always 0 */
sess->fore_channel.maxreq_sz = be32_to_cpup(p++);
sess->fore_channel.maxresp_sz = be32_to_cpup(p++);
sess->fore_channel.maxresp_cached = be32_to_cpup(p++);
sess->fore_channel.maxops = be32_to_cpup(p++);
sess->fore_channel.maxreqs = be32_to_cpup(p++);
sess->fore_channel.nr_rdma_attrs = be32_to_cpup(p++);
if (sess->fore_channel.nr_rdma_attrs == 1) {
READ_BUF(4);
sess->fore_channel.rdma_attrs = be32_to_cpup(p++);
} else if (sess->fore_channel.nr_rdma_attrs > 1) {
dprintk("Too many fore channel attr bitmaps!\n");
goto xdr_error;
}
/* Back channel attrs */
READ_BUF(28);
dummy = be32_to_cpup(p++); /* headerpadsz is always 0 */
sess->back_channel.maxreq_sz = be32_to_cpup(p++);
sess->back_channel.maxresp_sz = be32_to_cpup(p++);
sess->back_channel.maxresp_cached = be32_to_cpup(p++);
sess->back_channel.maxops = be32_to_cpup(p++);
sess->back_channel.maxreqs = be32_to_cpup(p++);
sess->back_channel.nr_rdma_attrs = be32_to_cpup(p++);
if (sess->back_channel.nr_rdma_attrs == 1) {
READ_BUF(4);
sess->back_channel.rdma_attrs = be32_to_cpup(p++);
} else if (sess->back_channel.nr_rdma_attrs > 1) {
dprintk("Too many back channel attr bitmaps!\n");
goto xdr_error;
}
READ_BUF(4);
sess->callback_prog = be32_to_cpup(p++);
nfsd4_decode_cb_sec(argp, &sess->cb_sec);
DECODE_TAIL;
}
static __be32
nfsd4_decode_destroy_session(struct nfsd4_compoundargs *argp,
struct nfsd4_destroy_session *destroy_session)
{
DECODE_HEAD;
READ_BUF(NFS4_MAX_SESSIONID_LEN);
COPYMEM(destroy_session->sessionid.data, NFS4_MAX_SESSIONID_LEN);
DECODE_TAIL;
}
static __be32
nfsd4_decode_free_stateid(struct nfsd4_compoundargs *argp,
struct nfsd4_free_stateid *free_stateid)
{
DECODE_HEAD;
READ_BUF(sizeof(stateid_t));
free_stateid->fr_stateid.si_generation = be32_to_cpup(p++);
COPYMEM(&free_stateid->fr_stateid.si_opaque, sizeof(stateid_opaque_t));
DECODE_TAIL;
}
static __be32
nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
struct nfsd4_sequence *seq)
{
DECODE_HEAD;
READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
COPYMEM(seq->sessionid.data, NFS4_MAX_SESSIONID_LEN);
seq->seqid = be32_to_cpup(p++);
seq->slotid = be32_to_cpup(p++);
seq->maxslots = be32_to_cpup(p++);
seq->cachethis = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_stateid *test_stateid)
{
int i;
__be32 *p, status;
struct nfsd4_test_stateid_id *stateid;
READ_BUF(4);
test_stateid->ts_num_ids = ntohl(*p++);
INIT_LIST_HEAD(&test_stateid->ts_stateid_list);
for (i = 0; i < test_stateid->ts_num_ids; i++) {
stateid = svcxdr_tmpalloc(argp, sizeof(*stateid));
if (!stateid) {
status = nfserrno(-ENOMEM);
goto out;
}
INIT_LIST_HEAD(&stateid->ts_id_list);
list_add_tail(&stateid->ts_id_list, &test_stateid->ts_stateid_list);
status = nfsd4_decode_stateid(argp, &stateid->ts_id_stateid);
if (status)
goto out;
}
status = 0;
out:
return status;
xdr_error:
dprintk("NFSD: xdr error (%s:%d)\n", __FILE__, __LINE__);
status = nfserr_bad_xdr;
goto out;
}
static __be32 nfsd4_decode_destroy_clientid(struct nfsd4_compoundargs *argp, struct nfsd4_destroy_clientid *dc)
{
DECODE_HEAD;
READ_BUF(8);
COPYMEM(&dc->clientid, 8);
DECODE_TAIL;
}
static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, struct nfsd4_reclaim_complete *rc)
{
DECODE_HEAD;
READ_BUF(4);
rc->rca_one_fs = be32_to_cpup(p++);
DECODE_TAIL;
}
#ifdef CONFIG_NFSD_PNFS
static __be32
nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
struct nfsd4_getdeviceinfo *gdev)
{
DECODE_HEAD;
u32 num, i;
READ_BUF(sizeof(struct nfsd4_deviceid) + 3 * 4);
COPYMEM(&gdev->gd_devid, sizeof(struct nfsd4_deviceid));
gdev->gd_layout_type = be32_to_cpup(p++);
gdev->gd_maxcount = be32_to_cpup(p++);
num = be32_to_cpup(p++);
if (num) {
READ_BUF(4 * num);
gdev->gd_notify_types = be32_to_cpup(p++);
for (i = 1; i < num; i++) {
if (be32_to_cpup(p++)) {
status = nfserr_inval;
goto out;
}
}
}
DECODE_TAIL;
}
static __be32
nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp,
struct nfsd4_layoutget *lgp)
{
DECODE_HEAD;
READ_BUF(36);
lgp->lg_signal = be32_to_cpup(p++);
lgp->lg_layout_type = be32_to_cpup(p++);
lgp->lg_seg.iomode = be32_to_cpup(p++);
p = xdr_decode_hyper(p, &lgp->lg_seg.offset);
p = xdr_decode_hyper(p, &lgp->lg_seg.length);
p = xdr_decode_hyper(p, &lgp->lg_minlength);
status = nfsd4_decode_stateid(argp, &lgp->lg_sid);
if (status)
return status;
READ_BUF(4);
lgp->lg_maxcount = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
struct nfsd4_layoutcommit *lcp)
{
DECODE_HEAD;
u32 timechange;
READ_BUF(20);
p = xdr_decode_hyper(p, &lcp->lc_seg.offset);
p = xdr_decode_hyper(p, &lcp->lc_seg.length);
lcp->lc_reclaim = be32_to_cpup(p++);
status = nfsd4_decode_stateid(argp, &lcp->lc_sid);
if (status)
return status;
READ_BUF(4);
lcp->lc_newoffset = be32_to_cpup(p++);
if (lcp->lc_newoffset) {
READ_BUF(8);
p = xdr_decode_hyper(p, &lcp->lc_last_wr);
} else
lcp->lc_last_wr = 0;
READ_BUF(4);
timechange = be32_to_cpup(p++);
if (timechange) {
status = nfsd4_decode_time(argp, &lcp->lc_mtime);
if (status)
return status;
} else {
lcp->lc_mtime.tv_nsec = UTIME_NOW;
}
READ_BUF(8);
lcp->lc_layout_type = be32_to_cpup(p++);
/*
* Save the layout update in XDR format and let the layout driver deal
* with it later.
*/
lcp->lc_up_len = be32_to_cpup(p++);
if (lcp->lc_up_len > 0) {
READ_BUF(lcp->lc_up_len);
READMEM(lcp->lc_up_layout, lcp->lc_up_len);
}
DECODE_TAIL;
}
static __be32
nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp,
struct nfsd4_layoutreturn *lrp)
{
DECODE_HEAD;
READ_BUF(16);
lrp->lr_reclaim = be32_to_cpup(p++);
lrp->lr_layout_type = be32_to_cpup(p++);
lrp->lr_seg.iomode = be32_to_cpup(p++);
lrp->lr_return_type = be32_to_cpup(p++);
if (lrp->lr_return_type == RETURN_FILE) {
READ_BUF(16);
p = xdr_decode_hyper(p, &lrp->lr_seg.offset);
p = xdr_decode_hyper(p, &lrp->lr_seg.length);
status = nfsd4_decode_stateid(argp, &lrp->lr_sid);
if (status)
return status;
READ_BUF(4);
lrp->lrf_body_len = be32_to_cpup(p++);
if (lrp->lrf_body_len > 0) {
READ_BUF(lrp->lrf_body_len);
READMEM(lrp->lrf_body, lrp->lrf_body_len);
}
} else {
lrp->lr_seg.offset = 0;
lrp->lr_seg.length = NFS4_MAX_UINT64;
}
DECODE_TAIL;
}
#endif /* CONFIG_NFSD_PNFS */
static __be32
nfsd4_decode_fallocate(struct nfsd4_compoundargs *argp,
struct nfsd4_fallocate *fallocate)
{
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &fallocate->falloc_stateid);
if (status)
return status;
READ_BUF(16);
p = xdr_decode_hyper(p, &fallocate->falloc_offset);
xdr_decode_hyper(p, &fallocate->falloc_length);
DECODE_TAIL;
}
static __be32
nfsd4_decode_clone(struct nfsd4_compoundargs *argp, struct nfsd4_clone *clone)
{
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &clone->cl_src_stateid);
if (status)
return status;
status = nfsd4_decode_stateid(argp, &clone->cl_dst_stateid);
if (status)
return status;
READ_BUF(8 + 8 + 8);
p = xdr_decode_hyper(p, &clone->cl_src_pos);
p = xdr_decode_hyper(p, &clone->cl_dst_pos);
p = xdr_decode_hyper(p, &clone->cl_count);
DECODE_TAIL;
}
static __be32
nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
{
DECODE_HEAD;
unsigned int tmp;
status = nfsd4_decode_stateid(argp, ©->cp_src_stateid);
if (status)
return status;
status = nfsd4_decode_stateid(argp, ©->cp_dst_stateid);
if (status)
return status;
READ_BUF(8 + 8 + 8 + 4 + 4 + 4);
p = xdr_decode_hyper(p, ©->cp_src_pos);
p = xdr_decode_hyper(p, ©->cp_dst_pos);
p = xdr_decode_hyper(p, ©->cp_count);
copy->cp_consecutive = be32_to_cpup(p++);
copy->cp_synchronous = be32_to_cpup(p++);
tmp = be32_to_cpup(p); /* Source server list not supported */
DECODE_TAIL;
}
static __be32
nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
{
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &seek->seek_stateid);
if (status)
return status;
READ_BUF(8 + 4);
p = xdr_decode_hyper(p, &seek->seek_offset);
seek->seek_whence = be32_to_cpup(p);
DECODE_TAIL;
}
static __be32
nfsd4_decode_noop(struct nfsd4_compoundargs *argp, void *p)
{
return nfs_ok;
}
static __be32
nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
{
return nfserr_notsupp;
}
typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
static nfsd4_dec nfsd4_dec_ops[] = {
[OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
[OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
[OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
[OP_CREATE] = (nfsd4_dec)nfsd4_decode_create,
[OP_DELEGPURGE] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_DELEGRETURN] = (nfsd4_dec)nfsd4_decode_delegreturn,
[OP_GETATTR] = (nfsd4_dec)nfsd4_decode_getattr,
[OP_GETFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_LINK] = (nfsd4_dec)nfsd4_decode_link,
[OP_LOCK] = (nfsd4_dec)nfsd4_decode_lock,
[OP_LOCKT] = (nfsd4_dec)nfsd4_decode_lockt,
[OP_LOCKU] = (nfsd4_dec)nfsd4_decode_locku,
[OP_LOOKUP] = (nfsd4_dec)nfsd4_decode_lookup,
[OP_LOOKUPP] = (nfsd4_dec)nfsd4_decode_noop,
[OP_NVERIFY] = (nfsd4_dec)nfsd4_decode_verify,
[OP_OPEN] = (nfsd4_dec)nfsd4_decode_open,
[OP_OPENATTR] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_OPEN_CONFIRM] = (nfsd4_dec)nfsd4_decode_open_confirm,
[OP_OPEN_DOWNGRADE] = (nfsd4_dec)nfsd4_decode_open_downgrade,
[OP_PUTFH] = (nfsd4_dec)nfsd4_decode_putfh,
[OP_PUTPUBFH] = (nfsd4_dec)nfsd4_decode_putpubfh,
[OP_PUTROOTFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_READ] = (nfsd4_dec)nfsd4_decode_read,
[OP_READDIR] = (nfsd4_dec)nfsd4_decode_readdir,
[OP_READLINK] = (nfsd4_dec)nfsd4_decode_noop,
[OP_REMOVE] = (nfsd4_dec)nfsd4_decode_remove,
[OP_RENAME] = (nfsd4_dec)nfsd4_decode_rename,
[OP_RENEW] = (nfsd4_dec)nfsd4_decode_renew,
[OP_RESTOREFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_SAVEFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_SECINFO] = (nfsd4_dec)nfsd4_decode_secinfo,
[OP_SETATTR] = (nfsd4_dec)nfsd4_decode_setattr,
[OP_SETCLIENTID] = (nfsd4_dec)nfsd4_decode_setclientid,
[OP_SETCLIENTID_CONFIRM] = (nfsd4_dec)nfsd4_decode_setclientid_confirm,
[OP_VERIFY] = (nfsd4_dec)nfsd4_decode_verify,
[OP_WRITE] = (nfsd4_dec)nfsd4_decode_write,
[OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
/* new operations for NFSv4.1 */
[OP_BACKCHANNEL_CTL] = (nfsd4_dec)nfsd4_decode_backchannel_ctl,
[OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_bind_conn_to_session,
[OP_EXCHANGE_ID] = (nfsd4_dec)nfsd4_decode_exchange_id,
[OP_CREATE_SESSION] = (nfsd4_dec)nfsd4_decode_create_session,
[OP_DESTROY_SESSION] = (nfsd4_dec)nfsd4_decode_destroy_session,
[OP_FREE_STATEID] = (nfsd4_dec)nfsd4_decode_free_stateid,
[OP_GET_DIR_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_getdeviceinfo,
[OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_layoutcommit,
[OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_layoutget,
[OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_layoutreturn,
#else
[OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_notsupp,
#endif
[OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_secinfo_no_name,
[OP_SEQUENCE] = (nfsd4_dec)nfsd4_decode_sequence,
[OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_TEST_STATEID] = (nfsd4_dec)nfsd4_decode_test_stateid,
[OP_WANT_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_DESTROY_CLIENTID] = (nfsd4_dec)nfsd4_decode_destroy_clientid,
[OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_reclaim_complete,
/* new operations for NFSv4.2 */
[OP_ALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate,
[OP_COPY] = (nfsd4_dec)nfsd4_decode_copy,
[OP_COPY_NOTIFY] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_DEALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate,
[OP_IO_ADVISE] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTERROR] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTSTATS] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_OFFLOAD_CANCEL] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_OFFLOAD_STATUS] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_READ_PLUS] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_SEEK] = (nfsd4_dec)nfsd4_decode_seek,
[OP_WRITE_SAME] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_CLONE] = (nfsd4_dec)nfsd4_decode_clone,
};
static inline bool
nfsd4_opnum_in_range(struct nfsd4_compoundargs *argp, struct nfsd4_op *op)
{
if (op->opnum < FIRST_NFS4_OP)
return false;
else if (argp->minorversion == 0 && op->opnum > LAST_NFS40_OP)
return false;
else if (argp->minorversion == 1 && op->opnum > LAST_NFS41_OP)
return false;
else if (argp->minorversion == 2 && op->opnum > LAST_NFS42_OP)
return false;
return true;
}
static __be32
nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
{
DECODE_HEAD;
struct nfsd4_op *op;
bool cachethis = false;
int auth_slack= argp->rqstp->rq_auth_slack;
int max_reply = auth_slack + 8; /* opcnt, status */
int readcount = 0;
int readbytes = 0;
int i;
READ_BUF(4);
argp->taglen = be32_to_cpup(p++);
READ_BUF(argp->taglen);
SAVEMEM(argp->tag, argp->taglen);
READ_BUF(8);
argp->minorversion = be32_to_cpup(p++);
argp->opcnt = be32_to_cpup(p++);
max_reply += 4 + (XDR_QUADLEN(argp->taglen) << 2);
if (argp->taglen > NFSD4_MAX_TAGLEN)
goto xdr_error;
if (argp->opcnt > 100)
goto xdr_error;
if (argp->opcnt > ARRAY_SIZE(argp->iops)) {
argp->ops = kzalloc(argp->opcnt * sizeof(*argp->ops), GFP_KERNEL);
if (!argp->ops) {
argp->ops = argp->iops;
dprintk("nfsd: couldn't allocate room for COMPOUND\n");
goto xdr_error;
}
}
if (argp->minorversion > NFSD_SUPPORTED_MINOR_VERSION)
argp->opcnt = 0;
for (i = 0; i < argp->opcnt; i++) {
op = &argp->ops[i];
op->replay = NULL;
READ_BUF(4);
op->opnum = be32_to_cpup(p++);
if (nfsd4_opnum_in_range(argp, op))
op->status = nfsd4_dec_ops[op->opnum](argp, &op->u);
else {
op->opnum = OP_ILLEGAL;
op->status = nfserr_op_illegal;
}
/*
* We'll try to cache the result in the DRC if any one
* op in the compound wants to be cached:
*/
cachethis |= nfsd4_cache_this_op(op);
if (op->opnum == OP_READ) {
readcount++;
readbytes += nfsd4_max_reply(argp->rqstp, op);
} else
max_reply += nfsd4_max_reply(argp->rqstp, op);
/*
* OP_LOCK and OP_LOCKT may return a conflicting lock.
* (Special case because it will just skip encoding this
* if it runs out of xdr buffer space, and it is the only
* operation that behaves this way.)
*/
if (op->opnum == OP_LOCK || op->opnum == OP_LOCKT)
max_reply += NFS4_OPAQUE_LIMIT;
if (op->status) {
argp->opcnt = i+1;
break;
}
}
/* Sessions make the DRC unnecessary: */
if (argp->minorversion)
cachethis = false;
svc_reserve(argp->rqstp, max_reply + readbytes);
argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
if (readcount > 1 || max_reply > PAGE_SIZE - auth_slack)
clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
DECODE_TAIL;
}
static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
struct svc_export *exp)
{
if (exp->ex_flags & NFSEXP_V4ROOT) {
*p++ = cpu_to_be32(convert_to_wallclock(exp->cd->flush_time));
*p++ = 0;
} else if (IS_I_VERSION(inode)) {
p = xdr_encode_hyper(p, inode->i_version);
} else {
*p++ = cpu_to_be32(stat->ctime.tv_sec);
*p++ = cpu_to_be32(stat->ctime.tv_nsec);
}
return p;
}
static __be32 *encode_cinfo(__be32 *p, struct nfsd4_change_info *c)
{
*p++ = cpu_to_be32(c->atomic);
if (c->change_supported) {
p = xdr_encode_hyper(p, c->before_change);
p = xdr_encode_hyper(p, c->after_change);
} else {
*p++ = cpu_to_be32(c->before_ctime_sec);
*p++ = cpu_to_be32(c->before_ctime_nsec);
*p++ = cpu_to_be32(c->after_ctime_sec);
*p++ = cpu_to_be32(c->after_ctime_nsec);
}
return p;
}
/* Encode as an array of strings the string given with components
* separated @sep, escaped with esc_enter and esc_exit.
*/
static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep,
char *components, char esc_enter,
char esc_exit)
{
__be32 *p;
__be32 pathlen;
int pathlen_offset;
int strlen, count=0;
char *str, *end, *next;
dprintk("nfsd4_encode_components(%s)\n", components);
pathlen_offset = xdr->buf->len;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
p++; /* We will fill this in with @count later */
end = str = components;
while (*end) {
bool found_esc = false;
/* try to parse as esc_start, ..., esc_end, sep */
if (*str == esc_enter) {
for (; *end && (*end != esc_exit); end++)
/* find esc_exit or end of string */;
next = end + 1;
if (*end && (!*next || *next == sep)) {
str++;
found_esc = true;
}
}
if (!found_esc)
for (; *end && (*end != sep); end++)
/* find sep or end of string */;
strlen = end - str;
if (strlen) {
p = xdr_reserve_space(xdr, strlen + 4);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque(p, str, strlen);
count++;
}
else
end++;
if (found_esc)
end = next;
str = end;
}
pathlen = htonl(count);
write_bytes_to_xdr_buf(xdr->buf, pathlen_offset, &pathlen, 4);
return 0;
}
/* Encode as an array of strings the string given with components
* separated @sep.
*/
static __be32 nfsd4_encode_components(struct xdr_stream *xdr, char sep,
char *components)
{
return nfsd4_encode_components_esc(xdr, sep, components, 0, 0);
}
/*
* encode a location element of a fs_locations structure
*/
static __be32 nfsd4_encode_fs_location4(struct xdr_stream *xdr,
struct nfsd4_fs_location *location)
{
__be32 status;
status = nfsd4_encode_components_esc(xdr, ':', location->hosts,
'[', ']');
if (status)
return status;
status = nfsd4_encode_components(xdr, '/', location->path);
if (status)
return status;
return 0;
}
/*
* Encode a path in RFC3530 'pathname4' format
*/
static __be32 nfsd4_encode_path(struct xdr_stream *xdr,
const struct path *root,
const struct path *path)
{
struct path cur = *path;
__be32 *p;
struct dentry **components = NULL;
unsigned int ncomponents = 0;
__be32 err = nfserr_jukebox;
dprintk("nfsd4_encode_components(");
path_get(&cur);
/* First walk the path up to the nfsd root, and store the
* dentries/path components in an array.
*/
for (;;) {
if (path_equal(&cur, root))
break;
if (cur.dentry == cur.mnt->mnt_root) {
if (follow_up(&cur))
continue;
goto out_free;
}
if ((ncomponents & 15) == 0) {
struct dentry **new;
new = krealloc(components,
sizeof(*new) * (ncomponents + 16),
GFP_KERNEL);
if (!new)
goto out_free;
components = new;
}
components[ncomponents++] = cur.dentry;
cur.dentry = dget_parent(cur.dentry);
}
err = nfserr_resource;
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_free;
*p++ = cpu_to_be32(ncomponents);
while (ncomponents) {
struct dentry *dentry = components[ncomponents - 1];
unsigned int len;
spin_lock(&dentry->d_lock);
len = dentry->d_name.len;
p = xdr_reserve_space(xdr, len + 4);
if (!p) {
spin_unlock(&dentry->d_lock);
goto out_free;
}
p = xdr_encode_opaque(p, dentry->d_name.name, len);
dprintk("/%pd", dentry);
spin_unlock(&dentry->d_lock);
dput(dentry);
ncomponents--;
}
err = 0;
out_free:
dprintk(")\n");
while (ncomponents)
dput(components[--ncomponents]);
kfree(components);
path_put(&cur);
return err;
}
static __be32 nfsd4_encode_fsloc_fsroot(struct xdr_stream *xdr,
struct svc_rqst *rqstp, const struct path *path)
{
struct svc_export *exp_ps;
__be32 res;
exp_ps = rqst_find_fsidzero_export(rqstp);
if (IS_ERR(exp_ps))
return nfserrno(PTR_ERR(exp_ps));
res = nfsd4_encode_path(xdr, &exp_ps->ex_path, path);
exp_put(exp_ps);
return res;
}
/*
* encode a fs_locations structure
*/
static __be32 nfsd4_encode_fs_locations(struct xdr_stream *xdr,
struct svc_rqst *rqstp, struct svc_export *exp)
{
__be32 status;
int i;
__be32 *p;
struct nfsd4_fs_locations *fslocs = &exp->ex_fslocs;
status = nfsd4_encode_fsloc_fsroot(xdr, rqstp, &exp->ex_path);
if (status)
return status;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(fslocs->locations_count);
for (i=0; i<fslocs->locations_count; i++) {
status = nfsd4_encode_fs_location4(xdr, &fslocs->locations[i]);
if (status)
return status;
}
return 0;
}
static u32 nfs4_file_type(umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFIFO: return NF4FIFO;
case S_IFCHR: return NF4CHR;
case S_IFDIR: return NF4DIR;
case S_IFBLK: return NF4BLK;
case S_IFLNK: return NF4LNK;
case S_IFREG: return NF4REG;
case S_IFSOCK: return NF4SOCK;
default: return NF4BAD;
};
}
static inline __be32
nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
struct nfs4_ace *ace)
{
if (ace->whotype != NFS4_ACL_WHO_NAMED)
return nfs4_acl_write_who(xdr, ace->whotype);
else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
return nfsd4_encode_group(xdr, rqstp, ace->who_gid);
else
return nfsd4_encode_user(xdr, rqstp, ace->who_uid);
}
static inline __be32
nfsd4_encode_layout_types(struct xdr_stream *xdr, u32 layout_types)
{
__be32 *p;
unsigned long i = hweight_long(layout_types);
p = xdr_reserve_space(xdr, 4 + 4 * i);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(i);
for (i = LAYOUT_NFSV4_1_FILES; i < LAYOUT_TYPE_MAX; ++i)
if (layout_types & (1 << i))
*p++ = cpu_to_be32(i);
return 0;
}
#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
FATTR4_WORD0_RDATTR_ERROR)
#define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
#define WORD2_ABSENT_FS_ATTRS 0
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
static inline __be32
nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
void *context, int len)
{
__be32 *p;
p = xdr_reserve_space(xdr, len + 4 + 4 + 4);
if (!p)
return nfserr_resource;
/*
* For now we use a 0 here to indicate the null translation; in
* the future we may place a call to translation code here.
*/
*p++ = cpu_to_be32(0); /* lfs */
*p++ = cpu_to_be32(0); /* pi */
p = xdr_encode_opaque(p, context, len);
return 0;
}
#else
static inline __be32
nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
void *context, int len)
{ return 0; }
#endif
static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err)
{
/* As per referral draft: */
if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
*bmval1 & ~WORD1_ABSENT_FS_ATTRS) {
if (*bmval0 & FATTR4_WORD0_RDATTR_ERROR ||
*bmval0 & FATTR4_WORD0_FS_LOCATIONS)
*rdattr_err = NFSERR_MOVED;
else
return nfserr_moved;
}
*bmval0 &= WORD0_ABSENT_FS_ATTRS;
*bmval1 &= WORD1_ABSENT_FS_ATTRS;
*bmval2 &= WORD2_ABSENT_FS_ATTRS;
return 0;
}
static int get_parent_attributes(struct svc_export *exp, struct kstat *stat)
{
struct path path = exp->ex_path;
int err;
path_get(&path);
while (follow_up(&path)) {
if (path.dentry != path.mnt->mnt_root)
break;
}
err = vfs_getattr(&path, stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
path_put(&path);
return err;
}
static __be32
nfsd4_encode_bitmap(struct xdr_stream *xdr, u32 bmval0, u32 bmval1, u32 bmval2)
{
__be32 *p;
if (bmval2) {
p = xdr_reserve_space(xdr, 16);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(3);
*p++ = cpu_to_be32(bmval0);
*p++ = cpu_to_be32(bmval1);
*p++ = cpu_to_be32(bmval2);
} else if (bmval1) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(2);
*p++ = cpu_to_be32(bmval0);
*p++ = cpu_to_be32(bmval1);
} else {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
*p++ = cpu_to_be32(bmval0);
}
return 0;
out_resource:
return nfserr_resource;
}
/*
* Note: @fhp can be NULL; in this case, we might have to compose the filehandle
* ourselves.
*/
static __be32
nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
struct svc_export *exp,
struct dentry *dentry, u32 *bmval,
struct svc_rqst *rqstp, int ignore_crossmnt)
{
u32 bmval0 = bmval[0];
u32 bmval1 = bmval[1];
u32 bmval2 = bmval[2];
struct kstat stat;
struct svc_fh *tempfh = NULL;
struct kstatfs statfs;
__be32 *p;
int starting_len = xdr->buf->len;
int attrlen_offset;
__be32 attrlen;
u32 dummy;
u64 dummy64;
u32 rdattr_err = 0;
__be32 status;
int err;
struct nfs4_acl *acl = NULL;
void *context = NULL;
int contextlen;
bool contextsupport = false;
struct nfsd4_compoundres *resp = rqstp->rq_resp;
u32 minorversion = resp->cstate.minorversion;
struct path path = {
.mnt = exp->ex_path.mnt,
.dentry = dentry,
};
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
BUG_ON(!nfsd_attrs_supported(minorversion, bmval));
if (exp->ex_fslocs.migrated) {
status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
if (status)
goto out;
}
err = vfs_getattr(&path, &stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
if (err)
goto out_nfserr;
if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
(bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
FATTR4_WORD1_SPACE_TOTAL))) {
err = vfs_statfs(&path, &statfs);
if (err)
goto out_nfserr;
}
if ((bmval0 & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) && !fhp) {
tempfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
status = nfserr_jukebox;
if (!tempfh)
goto out;
fh_init(tempfh, NFS4_FHSIZE);
status = fh_compose(tempfh, exp, dentry, NULL);
if (status)
goto out;
fhp = tempfh;
}
if (bmval0 & FATTR4_WORD0_ACL) {
err = nfsd4_get_nfs4_acl(rqstp, dentry, &acl);
if (err == -EOPNOTSUPP)
bmval0 &= ~FATTR4_WORD0_ACL;
else if (err == -EINVAL) {
status = nfserr_attrnotsupp;
goto out;
} else if (err != 0)
goto out_nfserr;
}
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
if (exp->ex_flags & NFSEXP_SECURITY_LABEL)
err = security_inode_getsecctx(d_inode(dentry),
&context, &contextlen);
else
err = -EOPNOTSUPP;
contextsupport = (err == 0);
if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
if (err == -EOPNOTSUPP)
bmval2 &= ~FATTR4_WORD2_SECURITY_LABEL;
else if (err)
goto out_nfserr;
}
}
#endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
status = nfsd4_encode_bitmap(xdr, bmval0, bmval1, bmval2);
if (status)
goto out;
attrlen_offset = xdr->buf->len;
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
p++; /* to be backfilled later */
if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
u32 supp[3];
memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
if (!IS_POSIXACL(dentry->d_inode))
supp[0] &= ~FATTR4_WORD0_ACL;
if (!contextsupport)
supp[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
if (!supp[2]) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(2);
*p++ = cpu_to_be32(supp[0]);
*p++ = cpu_to_be32(supp[1]);
} else {
p = xdr_reserve_space(xdr, 16);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(3);
*p++ = cpu_to_be32(supp[0]);
*p++ = cpu_to_be32(supp[1]);
*p++ = cpu_to_be32(supp[2]);
}
}
if (bmval0 & FATTR4_WORD0_TYPE) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
dummy = nfs4_file_type(stat.mode);
if (dummy == NF4BAD) {
status = nfserr_serverfault;
goto out;
}
*p++ = cpu_to_be32(dummy);
}
if (bmval0 & FATTR4_WORD0_FH_EXPIRE_TYPE) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
if (exp->ex_flags & NFSEXP_NOSUBTREECHECK)
*p++ = cpu_to_be32(NFS4_FH_PERSISTENT);
else
*p++ = cpu_to_be32(NFS4_FH_PERSISTENT|
NFS4_FH_VOL_RENAME);
}
if (bmval0 & FATTR4_WORD0_CHANGE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = encode_change(p, &stat, d_inode(dentry), exp);
}
if (bmval0 & FATTR4_WORD0_SIZE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, stat.size);
}
if (bmval0 & FATTR4_WORD0_LINK_SUPPORT) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_SYMLINK_SUPPORT) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_NAMED_ATTR) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(0);
}
if (bmval0 & FATTR4_WORD0_FSID) {
p = xdr_reserve_space(xdr, 16);
if (!p)
goto out_resource;
if (exp->ex_fslocs.migrated) {
p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MAJOR);
p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MINOR);
} else switch(fsid_source(fhp)) {
case FSIDSOURCE_FSID:
p = xdr_encode_hyper(p, (u64)exp->ex_fsid);
p = xdr_encode_hyper(p, (u64)0);
break;
case FSIDSOURCE_DEV:
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(MAJOR(stat.dev));
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(MINOR(stat.dev));
break;
case FSIDSOURCE_UUID:
p = xdr_encode_opaque_fixed(p, exp->ex_uuid,
EX_UUID_LEN);
break;
}
}
if (bmval0 & FATTR4_WORD0_UNIQUE_HANDLES) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(0);
}
if (bmval0 & FATTR4_WORD0_LEASE_TIME) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(nn->nfsd4_lease);
}
if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(rdattr_err);
}
if (bmval0 & FATTR4_WORD0_ACL) {
struct nfs4_ace *ace;
if (acl == NULL) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(0);
goto out_acl;
}
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(acl->naces);
for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) {
p = xdr_reserve_space(xdr, 4*3);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(ace->type);
*p++ = cpu_to_be32(ace->flag);
*p++ = cpu_to_be32(ace->access_mask &
NFS4_ACE_MASK_ALL);
status = nfsd4_encode_aclname(xdr, rqstp, ace);
if (status)
goto out;
}
}
out_acl:
if (bmval0 & FATTR4_WORD0_ACLSUPPORT) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(IS_POSIXACL(dentry->d_inode) ?
ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL : 0);
}
if (bmval0 & FATTR4_WORD0_CANSETTIME) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(0);
}
if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_CHOWN_RESTRICTED) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_FILEHANDLE) {
p = xdr_reserve_space(xdr, fhp->fh_handle.fh_size + 4);
if (!p)
goto out_resource;
p = xdr_encode_opaque(p, &fhp->fh_handle.fh_base,
fhp->fh_handle.fh_size);
}
if (bmval0 & FATTR4_WORD0_FILEID) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, stat.ino);
}
if (bmval0 & FATTR4_WORD0_FILES_AVAIL) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (u64) statfs.f_ffree);
}
if (bmval0 & FATTR4_WORD0_FILES_FREE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (u64) statfs.f_ffree);
}
if (bmval0 & FATTR4_WORD0_FILES_TOTAL) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (u64) statfs.f_files);
}
if (bmval0 & FATTR4_WORD0_FS_LOCATIONS) {
status = nfsd4_encode_fs_locations(xdr, rqstp, exp);
if (status)
goto out;
}
if (bmval0 & FATTR4_WORD0_HOMOGENEOUS) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_MAXFILESIZE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, exp->ex_path.mnt->mnt_sb->s_maxbytes);
}
if (bmval0 & FATTR4_WORD0_MAXLINK) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(255);
}
if (bmval0 & FATTR4_WORD0_MAXNAME) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(statfs.f_namelen);
}
if (bmval0 & FATTR4_WORD0_MAXREAD) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (u64) svc_max_payload(rqstp));
}
if (bmval0 & FATTR4_WORD0_MAXWRITE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (u64) svc_max_payload(rqstp));
}
if (bmval1 & FATTR4_WORD1_MODE) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(stat.mode & S_IALLUGO);
}
if (bmval1 & FATTR4_WORD1_NO_TRUNC) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval1 & FATTR4_WORD1_NUMLINKS) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(stat.nlink);
}
if (bmval1 & FATTR4_WORD1_OWNER) {
status = nfsd4_encode_user(xdr, rqstp, stat.uid);
if (status)
goto out;
}
if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
status = nfsd4_encode_group(xdr, rqstp, stat.gid);
if (status)
goto out;
}
if (bmval1 & FATTR4_WORD1_RAWDEV) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
*p++ = cpu_to_be32((u32) MAJOR(stat.rdev));
*p++ = cpu_to_be32((u32) MINOR(stat.rdev));
}
if (bmval1 & FATTR4_WORD1_SPACE_AVAIL) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
dummy64 = (u64)statfs.f_bavail * (u64)statfs.f_bsize;
p = xdr_encode_hyper(p, dummy64);
}
if (bmval1 & FATTR4_WORD1_SPACE_FREE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
dummy64 = (u64)statfs.f_bfree * (u64)statfs.f_bsize;
p = xdr_encode_hyper(p, dummy64);
}
if (bmval1 & FATTR4_WORD1_SPACE_TOTAL) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
dummy64 = (u64)statfs.f_blocks * (u64)statfs.f_bsize;
p = xdr_encode_hyper(p, dummy64);
}
if (bmval1 & FATTR4_WORD1_SPACE_USED) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
dummy64 = (u64)stat.blocks << 9;
p = xdr_encode_hyper(p, dummy64);
}
if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (s64)stat.atime.tv_sec);
*p++ = cpu_to_be32(stat.atime.tv_nsec);
}
if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(1);
*p++ = cpu_to_be32(0);
}
if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (s64)stat.ctime.tv_sec);
*p++ = cpu_to_be32(stat.ctime.tv_nsec);
}
if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (s64)stat.mtime.tv_sec);
*p++ = cpu_to_be32(stat.mtime.tv_nsec);
}
if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
struct kstat parent_stat;
u64 ino = stat.ino;
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
/*
* Get parent's attributes if not ignoring crossmount
* and this is the root of a cross-mounted filesystem.
*/
if (ignore_crossmnt == 0 &&
dentry == exp->ex_path.mnt->mnt_root) {
err = get_parent_attributes(exp, &parent_stat);
if (err)
goto out_nfserr;
ino = parent_stat.ino;
}
p = xdr_encode_hyper(p, ino);
}
#ifdef CONFIG_NFSD_PNFS
if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) {
status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
if (status)
goto out;
}
if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) {
status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
if (status)
goto out;
}
if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(stat.blksize);
}
#endif /* CONFIG_NFSD_PNFS */
if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
u32 supp[3];
memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0;
supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1;
supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2;
status = nfsd4_encode_bitmap(xdr, supp[0], supp[1], supp[2]);
if (status)
goto out;
}
if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
status = nfsd4_encode_security_label(xdr, rqstp, context,
contextlen);
if (status)
goto out;
}
attrlen = htonl(xdr->buf->len - attrlen_offset - 4);
write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, 4);
status = nfs_ok;
out:
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
if (context)
security_release_secctx(context, contextlen);
#endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
kfree(acl);
if (tempfh) {
fh_put(tempfh);
kfree(tempfh);
}
if (status)
xdr_truncate_encode(xdr, starting_len);
return status;
out_nfserr:
status = nfserrno(err);
goto out;
out_resource:
status = nfserr_resource;
goto out;
}
static void svcxdr_init_encode_from_buffer(struct xdr_stream *xdr,
struct xdr_buf *buf, __be32 *p, int bytes)
{
xdr->scratch.iov_len = 0;
memset(buf, 0, sizeof(struct xdr_buf));
buf->head[0].iov_base = p;
buf->head[0].iov_len = 0;
buf->len = 0;
xdr->buf = buf;
xdr->iov = buf->head;
xdr->p = p;
xdr->end = (void *)p + bytes;
buf->buflen = bytes;
}
__be32 nfsd4_encode_fattr_to_buf(__be32 **p, int words,
struct svc_fh *fhp, struct svc_export *exp,
struct dentry *dentry, u32 *bmval,
struct svc_rqst *rqstp, int ignore_crossmnt)
{
struct xdr_buf dummy;
struct xdr_stream xdr;
__be32 ret;
svcxdr_init_encode_from_buffer(&xdr, &dummy, *p, words << 2);
ret = nfsd4_encode_fattr(&xdr, fhp, exp, dentry, bmval, rqstp,
ignore_crossmnt);
*p = xdr.p;
return ret;
}
static inline int attributes_need_mount(u32 *bmval)
{
if (bmval[0] & ~(FATTR4_WORD0_RDATTR_ERROR | FATTR4_WORD0_LEASE_TIME))
return 1;
if (bmval[1] & ~FATTR4_WORD1_MOUNTED_ON_FILEID)
return 1;
return 0;
}
static __be32
nfsd4_encode_dirent_fattr(struct xdr_stream *xdr, struct nfsd4_readdir *cd,
const char *name, int namlen)
{
struct svc_export *exp = cd->rd_fhp->fh_export;
struct dentry *dentry;
__be32 nfserr;
int ignore_crossmnt = 0;
dentry = lookup_one_len_unlocked(name, cd->rd_fhp->fh_dentry, namlen);
if (IS_ERR(dentry))
return nfserrno(PTR_ERR(dentry));
if (d_really_is_negative(dentry)) {
/*
* we're not holding the i_mutex here, so there's
* a window where this directory entry could have gone
* away.
*/
dput(dentry);
return nfserr_noent;
}
exp_get(exp);
/*
* In the case of a mountpoint, the client may be asking for
* attributes that are only properties of the underlying filesystem
* as opposed to the cross-mounted file system. In such a case,
* we will not follow the cross mount and will fill the attribtutes
* directly from the mountpoint dentry.
*/
if (nfsd_mountpoint(dentry, exp)) {
int err;
if (!(exp->ex_flags & NFSEXP_V4ROOT)
&& !attributes_need_mount(cd->rd_bmval)) {
ignore_crossmnt = 1;
goto out_encode;
}
/*
* Why the heck aren't we just using nfsd_lookup??
* Different "."/".." handling? Something else?
* At least, add a comment here to explain....
*/
err = nfsd_cross_mnt(cd->rd_rqstp, &dentry, &exp);
if (err) {
nfserr = nfserrno(err);
goto out_put;
}
nfserr = check_nfsd_access(exp, cd->rd_rqstp);
if (nfserr)
goto out_put;
}
out_encode:
nfserr = nfsd4_encode_fattr(xdr, NULL, exp, dentry, cd->rd_bmval,
cd->rd_rqstp, ignore_crossmnt);
out_put:
dput(dentry);
exp_put(exp);
return nfserr;
}
static __be32 *
nfsd4_encode_rdattr_error(struct xdr_stream *xdr, __be32 nfserr)
{
__be32 *p;
p = xdr_reserve_space(xdr, 20);
if (!p)
return NULL;
*p++ = htonl(2);
*p++ = htonl(FATTR4_WORD0_RDATTR_ERROR); /* bmval0 */
*p++ = htonl(0); /* bmval1 */
*p++ = htonl(4); /* attribute length */
*p++ = nfserr; /* no htonl */
return p;
}
static int
nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
struct readdir_cd *ccd = ccdv;
struct nfsd4_readdir *cd = container_of(ccd, struct nfsd4_readdir, common);
struct xdr_stream *xdr = cd->xdr;
int start_offset = xdr->buf->len;
int cookie_offset;
u32 name_and_cookie;
int entry_bytes;
__be32 nfserr = nfserr_toosmall;
__be64 wire_offset;
__be32 *p;
/* In nfsv4, "." and ".." never make it onto the wire.. */
if (name && isdotent(name, namlen)) {
cd->common.err = nfs_ok;
return 0;
}
if (cd->cookie_offset) {
wire_offset = cpu_to_be64(offset);
write_bytes_to_xdr_buf(xdr->buf, cd->cookie_offset,
&wire_offset, 8);
}
p = xdr_reserve_space(xdr, 4);
if (!p)
goto fail;
*p++ = xdr_one; /* mark entry present */
cookie_offset = xdr->buf->len;
p = xdr_reserve_space(xdr, 3*4 + namlen);
if (!p)
goto fail;
p = xdr_encode_hyper(p, NFS_OFFSET_MAX); /* offset of next entry */
p = xdr_encode_array(p, name, namlen); /* name length & name */
nfserr = nfsd4_encode_dirent_fattr(xdr, cd, name, namlen);
switch (nfserr) {
case nfs_ok:
break;
case nfserr_resource:
nfserr = nfserr_toosmall;
goto fail;
case nfserr_noent:
xdr_truncate_encode(xdr, start_offset);
goto skip_entry;
default:
/*
* If the client requested the RDATTR_ERROR attribute,
* we stuff the error code into this attribute
* and continue. If this attribute was not requested,
* then in accordance with the spec, we fail the
* entire READDIR operation(!)
*/
if (!(cd->rd_bmval[0] & FATTR4_WORD0_RDATTR_ERROR))
goto fail;
p = nfsd4_encode_rdattr_error(xdr, nfserr);
if (p == NULL) {
nfserr = nfserr_toosmall;
goto fail;
}
}
nfserr = nfserr_toosmall;
entry_bytes = xdr->buf->len - start_offset;
if (entry_bytes > cd->rd_maxcount)
goto fail;
cd->rd_maxcount -= entry_bytes;
/*
* RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
* let's always let through the first entry, at least:
*/
if (!cd->rd_dircount)
goto fail;
name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
goto fail;
cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
cd->cookie_offset = cookie_offset;
skip_entry:
cd->common.err = nfs_ok;
return 0;
fail:
xdr_truncate_encode(xdr, start_offset);
cd->common.err = nfserr;
return -EINVAL;
}
static __be32
nfsd4_encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
{
__be32 *p;
p = xdr_reserve_space(xdr, sizeof(stateid_t));
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(sid->si_generation);
p = xdr_encode_opaque_fixed(p, &sid->si_opaque,
sizeof(stateid_opaque_t));
return 0;
}
static __be32
nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_access *access)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 8);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(access->ac_supported);
*p++ = cpu_to_be32(access->ac_resp_access);
}
return nfserr;
}
static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_bind_conn_to_session *bcts)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 8);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque_fixed(p, bcts->sessionid.data,
NFS4_MAX_SESSIONID_LEN);
*p++ = cpu_to_be32(bcts->dir);
/* Upshifting from TCP to RDMA is not supported */
*p++ = cpu_to_be32(0);
}
return nfserr;
}
static __be32
nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close)
{
struct xdr_stream *xdr = &resp->xdr;
if (!nfserr)
nfserr = nfsd4_encode_stateid(xdr, &close->cl_stateid);
return nfserr;
}
static __be32
nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_commit *commit)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque_fixed(p, commit->co_verf.data,
NFS4_VERIFIER_SIZE);
}
return nfserr;
}
static __be32
nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_create *create)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 20);
if (!p)
return nfserr_resource;
encode_cinfo(p, &create->cr_cinfo);
nfserr = nfsd4_encode_bitmap(xdr, create->cr_bmval[0],
create->cr_bmval[1], create->cr_bmval[2]);
}
return nfserr;
}
static __be32
nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_getattr *getattr)
{
struct svc_fh *fhp = getattr->ga_fhp;
struct xdr_stream *xdr = &resp->xdr;
if (nfserr)
return nfserr;
nfserr = nfsd4_encode_fattr(xdr, fhp, fhp->fh_export, fhp->fh_dentry,
getattr->ga_bmval,
resp->rqstp, 0);
return nfserr;
}
static __be32
nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr, struct svc_fh **fhpp)
{
struct xdr_stream *xdr = &resp->xdr;
struct svc_fh *fhp = *fhpp;
unsigned int len;
__be32 *p;
if (!nfserr) {
len = fhp->fh_handle.fh_size;
p = xdr_reserve_space(xdr, len + 4);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque(p, &fhp->fh_handle.fh_base, len);
}
return nfserr;
}
/*
* Including all fields other than the name, a LOCK4denied structure requires
* 8(clientid) + 4(namelen) + 8(offset) + 8(length) + 4(type) = 32 bytes.
*/
static __be32
nfsd4_encode_lock_denied(struct xdr_stream *xdr, struct nfsd4_lock_denied *ld)
{
struct xdr_netobj *conf = &ld->ld_owner;
__be32 *p;
again:
p = xdr_reserve_space(xdr, 32 + XDR_LEN(conf->len));
if (!p) {
/*
* Don't fail to return the result just because we can't
* return the conflicting open:
*/
if (conf->len) {
kfree(conf->data);
conf->len = 0;
conf->data = NULL;
goto again;
}
return nfserr_resource;
}
p = xdr_encode_hyper(p, ld->ld_start);
p = xdr_encode_hyper(p, ld->ld_length);
*p++ = cpu_to_be32(ld->ld_type);
if (conf->len) {
p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
p = xdr_encode_opaque(p, conf->data, conf->len);
kfree(conf->data);
} else { /* non - nfsv4 lock in conflict, no clientid nor owner */
p = xdr_encode_hyper(p, (u64)0); /* clientid */
*p++ = cpu_to_be32(0); /* length of owner name */
}
return nfserr_denied;
}
static __be32
nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lock *lock)
{
struct xdr_stream *xdr = &resp->xdr;
if (!nfserr)
nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
else if (nfserr == nfserr_denied)
nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
return nfserr;
}
static __be32
nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lockt *lockt)
{
struct xdr_stream *xdr = &resp->xdr;
if (nfserr == nfserr_denied)
nfsd4_encode_lock_denied(xdr, &lockt->lt_denied);
return nfserr;
}
static __be32
nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_locku *locku)
{
struct xdr_stream *xdr = &resp->xdr;
if (!nfserr)
nfserr = nfsd4_encode_stateid(xdr, &locku->lu_stateid);
return nfserr;
}
static __be32
nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_link *link)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 20);
if (!p)
return nfserr_resource;
p = encode_cinfo(p, &link->li_cinfo);
}
return nfserr;
}
static __be32
nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (nfserr)
goto out;
nfserr = nfsd4_encode_stateid(xdr, &open->op_stateid);
if (nfserr)
goto out;
p = xdr_reserve_space(xdr, 24);
if (!p)
return nfserr_resource;
p = encode_cinfo(p, &open->op_cinfo);
*p++ = cpu_to_be32(open->op_rflags);
nfserr = nfsd4_encode_bitmap(xdr, open->op_bmval[0], open->op_bmval[1],
open->op_bmval[2]);
if (nfserr)
goto out;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(open->op_delegate_type);
switch (open->op_delegate_type) {
case NFS4_OPEN_DELEGATE_NONE:
break;
case NFS4_OPEN_DELEGATE_READ:
nfserr = nfsd4_encode_stateid(xdr, &open->op_delegate_stateid);
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 20);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(open->op_recall);
/*
* TODO: ACE's in delegations
*/
*p++ = cpu_to_be32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0); /* XXX: is NULL principal ok? */
break;
case NFS4_OPEN_DELEGATE_WRITE:
nfserr = nfsd4_encode_stateid(xdr, &open->op_delegate_stateid);
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 32);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(0);
/*
* TODO: space_limit's in delegations
*/
*p++ = cpu_to_be32(NFS4_LIMIT_SIZE);
*p++ = cpu_to_be32(~(u32)0);
*p++ = cpu_to_be32(~(u32)0);
/*
* TODO: ACE's in delegations
*/
*p++ = cpu_to_be32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0); /* XXX: is NULL principal ok? */
break;
case NFS4_OPEN_DELEGATE_NONE_EXT: /* 4.1 */
switch (open->op_why_no_deleg) {
case WND4_CONTENTION:
case WND4_RESOURCE:
p = xdr_reserve_space(xdr, 8);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(open->op_why_no_deleg);
/* deleg signaling not supported yet: */
*p++ = cpu_to_be32(0);
break;
default:
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(open->op_why_no_deleg);
}
break;
default:
BUG();
}
/* XXX save filehandle here */
out:
return nfserr;
}
static __be32
nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_confirm *oc)
{
struct xdr_stream *xdr = &resp->xdr;
if (!nfserr)
nfserr = nfsd4_encode_stateid(xdr, &oc->oc_resp_stateid);
return nfserr;
}
static __be32
nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_downgrade *od)
{
struct xdr_stream *xdr = &resp->xdr;
if (!nfserr)
nfserr = nfsd4_encode_stateid(xdr, &od->od_stateid);
return nfserr;
}
static __be32 nfsd4_encode_splice_read(
struct nfsd4_compoundres *resp,
struct nfsd4_read *read,
struct file *file, unsigned long maxcount)
{
struct xdr_stream *xdr = &resp->xdr;
struct xdr_buf *buf = xdr->buf;
u32 eof;
long len;
int space_left;
__be32 nfserr;
__be32 *p = xdr->p - 2;
/* Make sure there will be room for padding if needed */
if (xdr->end - xdr->p < 1)
return nfserr_resource;
len = maxcount;
nfserr = nfsd_splice_read(read->rd_rqstp, file,
read->rd_offset, &maxcount);
if (nfserr) {
/*
* nfsd_splice_actor may have already messed with the
* page length; reset it so as not to confuse
* xdr_truncate_encode:
*/
buf->page_len = 0;
return nfserr;
}
eof = nfsd_eof_on_read(len, maxcount, read->rd_offset,
d_inode(read->rd_fhp->fh_dentry)->i_size);
*(p++) = htonl(eof);
*(p++) = htonl(maxcount);
buf->page_len = maxcount;
buf->len += maxcount;
xdr->page_ptr += (buf->page_base + maxcount + PAGE_SIZE - 1)
/ PAGE_SIZE;
/* Use rest of head for padding and remaining ops: */
buf->tail[0].iov_base = xdr->p;
buf->tail[0].iov_len = 0;
xdr->iov = buf->tail;
if (maxcount&3) {
int pad = 4 - (maxcount&3);
*(xdr->p++) = 0;
buf->tail[0].iov_base += maxcount&3;
buf->tail[0].iov_len = pad;
buf->len += pad;
}
space_left = min_t(int, (void *)xdr->end - (void *)xdr->p,
buf->buflen - buf->len);
buf->buflen = buf->len + space_left;
xdr->end = (__be32 *)((void *)xdr->end + space_left);
return 0;
}
static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
struct nfsd4_read *read,
struct file *file, unsigned long maxcount)
{
struct xdr_stream *xdr = &resp->xdr;
u32 eof;
int v;
int starting_len = xdr->buf->len - 8;
long len;
int thislen;
__be32 nfserr;
__be32 tmp;
__be32 *p;
u32 zzz = 0;
int pad;
len = maxcount;
v = 0;
thislen = min_t(long, len, ((void *)xdr->end - (void *)xdr->p));
p = xdr_reserve_space(xdr, (thislen+3)&~3);
WARN_ON_ONCE(!p);
resp->rqstp->rq_vec[v].iov_base = p;
resp->rqstp->rq_vec[v].iov_len = thislen;
v++;
len -= thislen;
while (len) {
thislen = min_t(long, len, PAGE_SIZE);
p = xdr_reserve_space(xdr, (thislen+3)&~3);
WARN_ON_ONCE(!p);
resp->rqstp->rq_vec[v].iov_base = p;
resp->rqstp->rq_vec[v].iov_len = thislen;
v++;
len -= thislen;
}
read->rd_vlen = v;
len = maxcount;
nfserr = nfsd_readv(file, read->rd_offset, resp->rqstp->rq_vec,
read->rd_vlen, &maxcount);
if (nfserr)
return nfserr;
xdr_truncate_encode(xdr, starting_len + 8 + ((maxcount+3)&~3));
eof = nfsd_eof_on_read(len, maxcount, read->rd_offset,
d_inode(read->rd_fhp->fh_dentry)->i_size);
tmp = htonl(eof);
write_bytes_to_xdr_buf(xdr->buf, starting_len , &tmp, 4);
tmp = htonl(maxcount);
write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
pad = (maxcount&3) ? 4 - (maxcount&3) : 0;
write_bytes_to_xdr_buf(xdr->buf, starting_len + 8 + maxcount,
&zzz, pad);
return 0;
}
static __be32
nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_read *read)
{
unsigned long maxcount;
struct xdr_stream *xdr = &resp->xdr;
struct file *file = read->rd_filp;
int starting_len = xdr->buf->len;
struct raparms *ra = NULL;
__be32 *p;
if (nfserr)
goto out;
p = xdr_reserve_space(xdr, 8); /* eof flag and byte count */
if (!p) {
WARN_ON_ONCE(test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags));
nfserr = nfserr_resource;
goto out;
}
if (resp->xdr.buf->page_len &&
test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags)) {
WARN_ON_ONCE(1);
nfserr = nfserr_resource;
goto out;
}
xdr_commit_encode(xdr);
maxcount = svc_max_payload(resp->rqstp);
maxcount = min_t(unsigned long, maxcount,
(xdr->buf->buflen - xdr->buf->len));
maxcount = min_t(unsigned long, maxcount, read->rd_length);
if (read->rd_tmp_file)
ra = nfsd_init_raparms(file);
if (file->f_op->splice_read &&
test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
nfserr = nfsd4_encode_splice_read(resp, read, file, maxcount);
else
nfserr = nfsd4_encode_readv(resp, read, file, maxcount);
if (ra)
nfsd_put_raparams(file, ra);
if (nfserr)
xdr_truncate_encode(xdr, starting_len);
out:
if (file)
fput(file);
return nfserr;
}
static __be32
nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_readlink *readlink)
{
int maxcount;
__be32 wire_count;
int zero = 0;
struct xdr_stream *xdr = &resp->xdr;
int length_offset = xdr->buf->len;
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
maxcount = PAGE_SIZE;
p = xdr_reserve_space(xdr, maxcount);
if (!p)
return nfserr_resource;
/*
* XXX: By default, vfs_readlink() will truncate symlinks if they
* would overflow the buffer. Is this kosher in NFSv4? If not, one
* easy fix is: if vfs_readlink() precisely fills the buffer, assume
* that truncation occurred, and return NFS4ERR_RESOURCE.
*/
nfserr = nfsd_readlink(readlink->rl_rqstp, readlink->rl_fhp,
(char *)p, &maxcount);
if (nfserr == nfserr_isdir)
nfserr = nfserr_inval;
if (nfserr) {
xdr_truncate_encode(xdr, length_offset);
return nfserr;
}
wire_count = htonl(maxcount);
write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, 4);
xdr_truncate_encode(xdr, length_offset + 4 + ALIGN(maxcount, 4));
if (maxcount & 3)
write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount,
&zero, 4 - (maxcount&3));
return 0;
}
static __be32
nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_readdir *readdir)
{
int maxcount;
int bytes_left;
loff_t offset;
__be64 wire_offset;
struct xdr_stream *xdr = &resp->xdr;
int starting_len = xdr->buf->len;
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE);
if (!p)
return nfserr_resource;
/* XXX: Following NFSv3, we ignore the READDIR verifier for now. */
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
resp->xdr.buf->head[0].iov_len = ((char *)resp->xdr.p)
- (char *)resp->xdr.buf->head[0].iov_base;
/*
* Number of bytes left for directory entries allowing for the
* final 8 bytes of the readdir and a following failed op:
*/
bytes_left = xdr->buf->buflen - xdr->buf->len
- COMPOUND_ERR_SLACK_SPACE - 8;
if (bytes_left < 0) {
nfserr = nfserr_resource;
goto err_no_verf;
}
maxcount = min_t(u32, readdir->rd_maxcount, INT_MAX);
/*
* Note the rfc defines rd_maxcount as the size of the
* READDIR4resok structure, which includes the verifier above
* and the 8 bytes encoded at the end of this function:
*/
if (maxcount < 16) {
nfserr = nfserr_toosmall;
goto err_no_verf;
}
maxcount = min_t(int, maxcount-16, bytes_left);
/* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
if (!readdir->rd_dircount)
readdir->rd_dircount = INT_MAX;
readdir->xdr = xdr;
readdir->rd_maxcount = maxcount;
readdir->common.err = 0;
readdir->cookie_offset = 0;
offset = readdir->rd_cookie;
nfserr = nfsd_readdir(readdir->rd_rqstp, readdir->rd_fhp,
&offset,
&readdir->common, nfsd4_encode_dirent);
if (nfserr == nfs_ok &&
readdir->common.err == nfserr_toosmall &&
xdr->buf->len == starting_len + 8) {
/* nothing encoded; which limit did we hit?: */
if (maxcount - 16 < bytes_left)
/* It was the fault of rd_maxcount: */
nfserr = nfserr_toosmall;
else
/* We ran out of buffer space: */
nfserr = nfserr_resource;
}
if (nfserr)
goto err_no_verf;
if (readdir->cookie_offset) {
wire_offset = cpu_to_be64(offset);
write_bytes_to_xdr_buf(xdr->buf, readdir->cookie_offset,
&wire_offset, 8);
}
p = xdr_reserve_space(xdr, 8);
if (!p) {
WARN_ON_ONCE(1);
goto err_no_verf;
}
*p++ = 0; /* no more entries */
*p++ = htonl(readdir->common.err == nfserr_eof);
return 0;
err_no_verf:
xdr_truncate_encode(xdr, starting_len);
return nfserr;
}
static __be32
nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_remove *remove)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 20);
if (!p)
return nfserr_resource;
p = encode_cinfo(p, &remove->rm_cinfo);
}
return nfserr;
}
static __be32
nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_rename *rename)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 40);
if (!p)
return nfserr_resource;
p = encode_cinfo(p, &rename->rn_sinfo);
p = encode_cinfo(p, &rename->rn_tinfo);
}
return nfserr;
}
static __be32
nfsd4_do_encode_secinfo(struct xdr_stream *xdr,
__be32 nfserr, struct svc_export *exp)
{
u32 i, nflavs, supported;
struct exp_flavor_info *flavs;
struct exp_flavor_info def_flavs[2];
__be32 *p, *flavorsp;
static bool report = true;
if (nfserr)
goto out;
nfserr = nfserr_resource;
if (exp->ex_nflavors) {
flavs = exp->ex_flavors;
nflavs = exp->ex_nflavors;
} else { /* Handling of some defaults in absence of real secinfo: */
flavs = def_flavs;
if (exp->ex_client->flavour->flavour == RPC_AUTH_UNIX) {
nflavs = 2;
flavs[0].pseudoflavor = RPC_AUTH_UNIX;
flavs[1].pseudoflavor = RPC_AUTH_NULL;
} else if (exp->ex_client->flavour->flavour == RPC_AUTH_GSS) {
nflavs = 1;
flavs[0].pseudoflavor
= svcauth_gss_flavor(exp->ex_client);
} else {
nflavs = 1;
flavs[0].pseudoflavor
= exp->ex_client->flavour->flavour;
}
}
supported = 0;
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out;
flavorsp = p++; /* to be backfilled later */
for (i = 0; i < nflavs; i++) {
rpc_authflavor_t pf = flavs[i].pseudoflavor;
struct rpcsec_gss_info info;
if (rpcauth_get_gssinfo(pf, &info) == 0) {
supported++;
p = xdr_reserve_space(xdr, 4 + 4 +
XDR_LEN(info.oid.len) + 4 + 4);
if (!p)
goto out;
*p++ = cpu_to_be32(RPC_AUTH_GSS);
p = xdr_encode_opaque(p, info.oid.data, info.oid.len);
*p++ = cpu_to_be32(info.qop);
*p++ = cpu_to_be32(info.service);
} else if (pf < RPC_AUTH_MAXFLAVOR) {
supported++;
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out;
*p++ = cpu_to_be32(pf);
} else {
if (report)
pr_warn("NFS: SECINFO: security flavor %u "
"is not supported\n", pf);
}
}
if (nflavs != supported)
report = false;
*flavorsp = htonl(supported);
nfserr = 0;
out:
if (exp)
exp_put(exp);
return nfserr;
}
static __be32
nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_secinfo *secinfo)
{
struct xdr_stream *xdr = &resp->xdr;
return nfsd4_do_encode_secinfo(xdr, nfserr, secinfo->si_exp);
}
static __be32
nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_secinfo_no_name *secinfo)
{
struct xdr_stream *xdr = &resp->xdr;
return nfsd4_do_encode_secinfo(xdr, nfserr, secinfo->sin_exp);
}
/*
* The SETATTR encode routine is special -- it always encodes a bitmap,
* regardless of the error status.
*/
static __be32
nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setattr *setattr)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
p = xdr_reserve_space(xdr, 16);
if (!p)
return nfserr_resource;
if (nfserr) {
*p++ = cpu_to_be32(3);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
}
else {
*p++ = cpu_to_be32(3);
*p++ = cpu_to_be32(setattr->sa_bmval[0]);
*p++ = cpu_to_be32(setattr->sa_bmval[1]);
*p++ = cpu_to_be32(setattr->sa_bmval[2]);
}
return nfserr;
}
static __be32
nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setclientid *scd)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 8 + NFS4_VERIFIER_SIZE);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque_fixed(p, &scd->se_clientid, 8);
p = xdr_encode_opaque_fixed(p, &scd->se_confirm,
NFS4_VERIFIER_SIZE);
}
else if (nfserr == nfserr_clid_inuse) {
p = xdr_reserve_space(xdr, 8);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
}
return nfserr;
}
static __be32
nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_write *write)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 16);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(write->wr_bytes_written);
*p++ = cpu_to_be32(write->wr_how_written);
p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
NFS4_VERIFIER_SIZE);
}
return nfserr;
}
static __be32
nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_exchange_id *exid)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
char *major_id;
char *server_scope;
int major_id_sz;
int server_scope_sz;
int status = 0;
uint64_t minor_id = 0;
if (nfserr)
return nfserr;
major_id = utsname()->nodename;
major_id_sz = strlen(major_id);
server_scope = utsname()->nodename;
server_scope_sz = strlen(server_scope);
p = xdr_reserve_space(xdr,
8 /* eir_clientid */ +
4 /* eir_sequenceid */ +
4 /* eir_flags */ +
4 /* spr_how */);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque_fixed(p, &exid->clientid, 8);
*p++ = cpu_to_be32(exid->seqid);
*p++ = cpu_to_be32(exid->flags);
*p++ = cpu_to_be32(exid->spa_how);
switch (exid->spa_how) {
case SP4_NONE:
break;
case SP4_MACH_CRED:
/* spo_must_enforce bitmap: */
status = nfsd4_encode_bitmap(xdr,
exid->spo_must_enforce[0],
exid->spo_must_enforce[1],
exid->spo_must_enforce[2]);
if (status)
goto out;
/* spo_must_allow bitmap: */
status = nfsd4_encode_bitmap(xdr,
exid->spo_must_allow[0],
exid->spo_must_allow[1],
exid->spo_must_allow[2]);
if (status)
goto out;
break;
default:
WARN_ON_ONCE(1);
}
p = xdr_reserve_space(xdr,
8 /* so_minor_id */ +
4 /* so_major_id.len */ +
(XDR_QUADLEN(major_id_sz) * 4) +
4 /* eir_server_scope.len */ +
(XDR_QUADLEN(server_scope_sz) * 4) +
4 /* eir_server_impl_id.count (0) */);
if (!p)
return nfserr_resource;
/* The server_owner struct */
p = xdr_encode_hyper(p, minor_id); /* Minor id */
/* major id */
p = xdr_encode_opaque(p, major_id, major_id_sz);
/* Server scope */
p = xdr_encode_opaque(p, server_scope, server_scope_sz);
/* Implementation id */
*p++ = cpu_to_be32(0); /* zero length nfs_impl_id4 array */
return 0;
out:
return status;
}
static __be32
nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_create_session *sess)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 24);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque_fixed(p, sess->sessionid.data,
NFS4_MAX_SESSIONID_LEN);
*p++ = cpu_to_be32(sess->seqid);
*p++ = cpu_to_be32(sess->flags);
p = xdr_reserve_space(xdr, 28);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(0); /* headerpadsz */
*p++ = cpu_to_be32(sess->fore_channel.maxreq_sz);
*p++ = cpu_to_be32(sess->fore_channel.maxresp_sz);
*p++ = cpu_to_be32(sess->fore_channel.maxresp_cached);
*p++ = cpu_to_be32(sess->fore_channel.maxops);
*p++ = cpu_to_be32(sess->fore_channel.maxreqs);
*p++ = cpu_to_be32(sess->fore_channel.nr_rdma_attrs);
if (sess->fore_channel.nr_rdma_attrs) {
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(sess->fore_channel.rdma_attrs);
}
p = xdr_reserve_space(xdr, 28);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(0); /* headerpadsz */
*p++ = cpu_to_be32(sess->back_channel.maxreq_sz);
*p++ = cpu_to_be32(sess->back_channel.maxresp_sz);
*p++ = cpu_to_be32(sess->back_channel.maxresp_cached);
*p++ = cpu_to_be32(sess->back_channel.maxops);
*p++ = cpu_to_be32(sess->back_channel.maxreqs);
*p++ = cpu_to_be32(sess->back_channel.nr_rdma_attrs);
if (sess->back_channel.nr_rdma_attrs) {
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(sess->back_channel.rdma_attrs);
}
return 0;
}
static __be32
nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_sequence *seq)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 20);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque_fixed(p, seq->sessionid.data,
NFS4_MAX_SESSIONID_LEN);
*p++ = cpu_to_be32(seq->seqid);
*p++ = cpu_to_be32(seq->slotid);
/* Note slotid's are numbered from zero: */
*p++ = cpu_to_be32(seq->maxslots - 1); /* sr_highest_slotid */
*p++ = cpu_to_be32(seq->maxslots - 1); /* sr_target_highest_slotid */
*p++ = cpu_to_be32(seq->status_flags);
resp->cstate.data_offset = xdr->buf->len; /* DRC cache data pointer */
return 0;
}
static __be32
nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_test_stateid *test_stateid)
{
struct xdr_stream *xdr = &resp->xdr;
struct nfsd4_test_stateid_id *stateid, *next;
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 4 + (4 * test_stateid->ts_num_ids));
if (!p)
return nfserr_resource;
*p++ = htonl(test_stateid->ts_num_ids);
list_for_each_entry_safe(stateid, next, &test_stateid->ts_stateid_list, ts_id_list) {
*p++ = stateid->ts_id_status;
}
return nfserr;
}
#ifdef CONFIG_NFSD_PNFS
static __be32
nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_getdeviceinfo *gdev)
{
struct xdr_stream *xdr = &resp->xdr;
const struct nfsd4_layout_ops *ops;
u32 starting_len = xdr->buf->len, needed_len;
__be32 *p;
dprintk("%s: err %d\n", __func__, be32_to_cpu(nfserr));
if (nfserr)
goto out;
nfserr = nfserr_resource;
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out;
*p++ = cpu_to_be32(gdev->gd_layout_type);
/* If maxcount is 0 then just update notifications */
if (gdev->gd_maxcount != 0) {
ops = nfsd4_layout_ops[gdev->gd_layout_type];
nfserr = ops->encode_getdeviceinfo(xdr, gdev);
if (nfserr) {
/*
* We don't bother to burden the layout drivers with
* enforcing gd_maxcount, just tell the client to
* come back with a bigger buffer if it's not enough.
*/
if (xdr->buf->len + 4 > gdev->gd_maxcount)
goto toosmall;
goto out;
}
}
nfserr = nfserr_resource;
if (gdev->gd_notify_types) {
p = xdr_reserve_space(xdr, 4 + 4);
if (!p)
goto out;
*p++ = cpu_to_be32(1); /* bitmap length */
*p++ = cpu_to_be32(gdev->gd_notify_types);
} else {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out;
*p++ = 0;
}
nfserr = 0;
out:
kfree(gdev->gd_device);
dprintk("%s: done: %d\n", __func__, be32_to_cpu(nfserr));
return nfserr;
toosmall:
dprintk("%s: maxcount too small\n", __func__);
needed_len = xdr->buf->len + 4 /* notifications */;
xdr_truncate_encode(xdr, starting_len);
p = xdr_reserve_space(xdr, 4);
if (!p) {
nfserr = nfserr_resource;
} else {
*p++ = cpu_to_be32(needed_len);
nfserr = nfserr_toosmall;
}
goto out;
}
static __be32
nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_layoutget *lgp)
{
struct xdr_stream *xdr = &resp->xdr;
const struct nfsd4_layout_ops *ops;
__be32 *p;
dprintk("%s: err %d\n", __func__, nfserr);
if (nfserr)
goto out;
nfserr = nfserr_resource;
p = xdr_reserve_space(xdr, 36 + sizeof(stateid_opaque_t));
if (!p)
goto out;
*p++ = cpu_to_be32(1); /* we always set return-on-close */
*p++ = cpu_to_be32(lgp->lg_sid.si_generation);
p = xdr_encode_opaque_fixed(p, &lgp->lg_sid.si_opaque,
sizeof(stateid_opaque_t));
*p++ = cpu_to_be32(1); /* we always return a single layout */
p = xdr_encode_hyper(p, lgp->lg_seg.offset);
p = xdr_encode_hyper(p, lgp->lg_seg.length);
*p++ = cpu_to_be32(lgp->lg_seg.iomode);
*p++ = cpu_to_be32(lgp->lg_layout_type);
ops = nfsd4_layout_ops[lgp->lg_layout_type];
nfserr = ops->encode_layoutget(xdr, lgp);
out:
kfree(lgp->lg_content);
return nfserr;
}
static __be32
nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_layoutcommit *lcp)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(lcp->lc_size_chg);
if (lcp->lc_size_chg) {
p = xdr_reserve_space(xdr, 8);
if (!p)
return nfserr_resource;
p = xdr_encode_hyper(p, lcp->lc_newsize);
}
return nfs_ok;
}
static __be32
nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_layoutreturn *lrp)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(lrp->lrs_present);
if (lrp->lrs_present)
return nfsd4_encode_stateid(xdr, &lrp->lr_sid);
return nfs_ok;
}
#endif /* CONFIG_NFSD_PNFS */
static __be32
nfsd42_encode_write_res(struct nfsd4_compoundres *resp, struct nfsd42_write_res *write)
{
__be32 *p;
p = xdr_reserve_space(&resp->xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(0);
p = xdr_encode_hyper(p, write->wr_bytes_written);
*p++ = cpu_to_be32(write->wr_stable_how);
p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
NFS4_VERIFIER_SIZE);
return nfs_ok;
}
static __be32
nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_copy *copy)
{
__be32 *p;
if (!nfserr) {
nfserr = nfsd42_encode_write_res(resp, ©->cp_res);
if (nfserr)
return nfserr;
p = xdr_reserve_space(&resp->xdr, 4 + 4);
*p++ = cpu_to_be32(copy->cp_consecutive);
*p++ = cpu_to_be32(copy->cp_synchronous);
}
return nfserr;
}
static __be32
nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_seek *seek)
{
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(&resp->xdr, 4 + 8);
*p++ = cpu_to_be32(seek->seek_eof);
p = xdr_encode_hyper(p, seek->seek_pos);
return nfserr;
}
static __be32
nfsd4_encode_noop(struct nfsd4_compoundres *resp, __be32 nfserr, void *p)
{
return nfserr;
}
typedef __be32(* nfsd4_enc)(struct nfsd4_compoundres *, __be32, void *);
/*
* Note: nfsd4_enc_ops vector is shared for v4.0 and v4.1
* since we don't need to filter out obsolete ops as this is
* done in the decoding phase.
*/
static nfsd4_enc nfsd4_enc_ops[] = {
[OP_ACCESS] = (nfsd4_enc)nfsd4_encode_access,
[OP_CLOSE] = (nfsd4_enc)nfsd4_encode_close,
[OP_COMMIT] = (nfsd4_enc)nfsd4_encode_commit,
[OP_CREATE] = (nfsd4_enc)nfsd4_encode_create,
[OP_DELEGPURGE] = (nfsd4_enc)nfsd4_encode_noop,
[OP_DELEGRETURN] = (nfsd4_enc)nfsd4_encode_noop,
[OP_GETATTR] = (nfsd4_enc)nfsd4_encode_getattr,
[OP_GETFH] = (nfsd4_enc)nfsd4_encode_getfh,
[OP_LINK] = (nfsd4_enc)nfsd4_encode_link,
[OP_LOCK] = (nfsd4_enc)nfsd4_encode_lock,
[OP_LOCKT] = (nfsd4_enc)nfsd4_encode_lockt,
[OP_LOCKU] = (nfsd4_enc)nfsd4_encode_locku,
[OP_LOOKUP] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LOOKUPP] = (nfsd4_enc)nfsd4_encode_noop,
[OP_NVERIFY] = (nfsd4_enc)nfsd4_encode_noop,
[OP_OPEN] = (nfsd4_enc)nfsd4_encode_open,
[OP_OPENATTR] = (nfsd4_enc)nfsd4_encode_noop,
[OP_OPEN_CONFIRM] = (nfsd4_enc)nfsd4_encode_open_confirm,
[OP_OPEN_DOWNGRADE] = (nfsd4_enc)nfsd4_encode_open_downgrade,
[OP_PUTFH] = (nfsd4_enc)nfsd4_encode_noop,
[OP_PUTPUBFH] = (nfsd4_enc)nfsd4_encode_noop,
[OP_PUTROOTFH] = (nfsd4_enc)nfsd4_encode_noop,
[OP_READ] = (nfsd4_enc)nfsd4_encode_read,
[OP_READDIR] = (nfsd4_enc)nfsd4_encode_readdir,
[OP_READLINK] = (nfsd4_enc)nfsd4_encode_readlink,
[OP_REMOVE] = (nfsd4_enc)nfsd4_encode_remove,
[OP_RENAME] = (nfsd4_enc)nfsd4_encode_rename,
[OP_RENEW] = (nfsd4_enc)nfsd4_encode_noop,
[OP_RESTOREFH] = (nfsd4_enc)nfsd4_encode_noop,
[OP_SAVEFH] = (nfsd4_enc)nfsd4_encode_noop,
[OP_SECINFO] = (nfsd4_enc)nfsd4_encode_secinfo,
[OP_SETATTR] = (nfsd4_enc)nfsd4_encode_setattr,
[OP_SETCLIENTID] = (nfsd4_enc)nfsd4_encode_setclientid,
[OP_SETCLIENTID_CONFIRM] = (nfsd4_enc)nfsd4_encode_noop,
[OP_VERIFY] = (nfsd4_enc)nfsd4_encode_noop,
[OP_WRITE] = (nfsd4_enc)nfsd4_encode_write,
[OP_RELEASE_LOCKOWNER] = (nfsd4_enc)nfsd4_encode_noop,
/* NFSv4.1 operations */
[OP_BACKCHANNEL_CTL] = (nfsd4_enc)nfsd4_encode_noop,
[OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_bind_conn_to_session,
[OP_EXCHANGE_ID] = (nfsd4_enc)nfsd4_encode_exchange_id,
[OP_CREATE_SESSION] = (nfsd4_enc)nfsd4_encode_create_session,
[OP_DESTROY_SESSION] = (nfsd4_enc)nfsd4_encode_noop,
[OP_FREE_STATEID] = (nfsd4_enc)nfsd4_encode_noop,
[OP_GET_DIR_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop,
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = (nfsd4_enc)nfsd4_encode_getdeviceinfo,
[OP_GETDEVICELIST] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_layoutcommit,
[OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_layoutget,
[OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_layoutreturn,
#else
[OP_GETDEVICEINFO] = (nfsd4_enc)nfsd4_encode_noop,
[OP_GETDEVICELIST] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_noop,
#endif
[OP_SECINFO_NO_NAME] = (nfsd4_enc)nfsd4_encode_secinfo_no_name,
[OP_SEQUENCE] = (nfsd4_enc)nfsd4_encode_sequence,
[OP_SET_SSV] = (nfsd4_enc)nfsd4_encode_noop,
[OP_TEST_STATEID] = (nfsd4_enc)nfsd4_encode_test_stateid,
[OP_WANT_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop,
[OP_DESTROY_CLIENTID] = (nfsd4_enc)nfsd4_encode_noop,
[OP_RECLAIM_COMPLETE] = (nfsd4_enc)nfsd4_encode_noop,
/* NFSv4.2 operations */
[OP_ALLOCATE] = (nfsd4_enc)nfsd4_encode_noop,
[OP_COPY] = (nfsd4_enc)nfsd4_encode_copy,
[OP_COPY_NOTIFY] = (nfsd4_enc)nfsd4_encode_noop,
[OP_DEALLOCATE] = (nfsd4_enc)nfsd4_encode_noop,
[OP_IO_ADVISE] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTERROR] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTSTATS] = (nfsd4_enc)nfsd4_encode_noop,
[OP_OFFLOAD_CANCEL] = (nfsd4_enc)nfsd4_encode_noop,
[OP_OFFLOAD_STATUS] = (nfsd4_enc)nfsd4_encode_noop,
[OP_READ_PLUS] = (nfsd4_enc)nfsd4_encode_noop,
[OP_SEEK] = (nfsd4_enc)nfsd4_encode_seek,
[OP_WRITE_SAME] = (nfsd4_enc)nfsd4_encode_noop,
[OP_CLONE] = (nfsd4_enc)nfsd4_encode_noop,
};
/*
* Calculate whether we still have space to encode repsize bytes.
* There are two considerations:
* - For NFS versions >=4.1, the size of the reply must stay within
* session limits
* - For all NFS versions, we must stay within limited preallocated
* buffer space.
*
* This is called before the operation is processed, so can only provide
* an upper estimate. For some nonidempotent operations (such as
* getattr), it's not necessarily a problem if that estimate is wrong,
* as we can fail it after processing without significant side effects.
*/
__be32 nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 respsize)
{
struct xdr_buf *buf = &resp->rqstp->rq_res;
struct nfsd4_slot *slot = resp->cstate.slot;
if (buf->len + respsize <= buf->buflen)
return nfs_ok;
if (!nfsd4_has_session(&resp->cstate))
return nfserr_resource;
if (slot->sl_flags & NFSD4_SLOT_CACHETHIS) {
WARN_ON_ONCE(1);
return nfserr_rep_too_big_to_cache;
}
return nfserr_rep_too_big;
}
void
nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
{
struct xdr_stream *xdr = &resp->xdr;
struct nfs4_stateowner *so = resp->cstate.replay_owner;
struct svc_rqst *rqstp = resp->rqstp;
int post_err_offset;
nfsd4_enc encoder;
__be32 *p;
p = xdr_reserve_space(xdr, 8);
if (!p) {
WARN_ON_ONCE(1);
return;
}
*p++ = cpu_to_be32(op->opnum);
post_err_offset = xdr->buf->len;
if (op->opnum == OP_ILLEGAL)
goto status;
BUG_ON(op->opnum < 0 || op->opnum >= ARRAY_SIZE(nfsd4_enc_ops) ||
!nfsd4_enc_ops[op->opnum]);
encoder = nfsd4_enc_ops[op->opnum];
op->status = encoder(resp, op->status, &op->u);
xdr_commit_encode(xdr);
/* nfsd4_check_resp_size guarantees enough room for error status */
if (!op->status) {
int space_needed = 0;
if (!nfsd4_last_compound_op(rqstp))
space_needed = COMPOUND_ERR_SLACK_SPACE;
op->status = nfsd4_check_resp_size(resp, space_needed);
}
if (op->status == nfserr_resource && nfsd4_has_session(&resp->cstate)) {
struct nfsd4_slot *slot = resp->cstate.slot;
if (slot->sl_flags & NFSD4_SLOT_CACHETHIS)
op->status = nfserr_rep_too_big_to_cache;
else
op->status = nfserr_rep_too_big;
}
if (op->status == nfserr_resource ||
op->status == nfserr_rep_too_big ||
op->status == nfserr_rep_too_big_to_cache) {
/*
* The operation may have already been encoded or
* partially encoded. No op returns anything additional
* in the case of one of these three errors, so we can
* just truncate back to after the status. But it's a
* bug if we had to do this on a non-idempotent op:
*/
warn_on_nonidempotent_op(op);
xdr_truncate_encode(xdr, post_err_offset);
}
if (so) {
int len = xdr->buf->len - post_err_offset;
so->so_replay.rp_status = op->status;
so->so_replay.rp_buflen = len;
read_bytes_from_xdr_buf(xdr->buf, post_err_offset,
so->so_replay.rp_buf, len);
}
status:
/* Note that op->status is already in network byte order: */
write_bytes_to_xdr_buf(xdr->buf, post_err_offset - 4, &op->status, 4);
}
/*
* Encode the reply stored in the stateowner reply cache
*
* XDR note: do not encode rp->rp_buflen: the buffer contains the
* previously sent already encoded operation.
*/
void
nfsd4_encode_replay(struct xdr_stream *xdr, struct nfsd4_op *op)
{
__be32 *p;
struct nfs4_replay *rp = op->replay;
BUG_ON(!rp);
p = xdr_reserve_space(xdr, 8 + rp->rp_buflen);
if (!p) {
WARN_ON_ONCE(1);
return;
}
*p++ = cpu_to_be32(op->opnum);
*p++ = rp->rp_status; /* already xdr'ed */
p = xdr_encode_opaque_fixed(p, rp->rp_buf, rp->rp_buflen);
}
int
nfs4svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
return xdr_ressize_check(rqstp, p);
}
int nfsd4_release_compoundargs(void *rq, __be32 *p, void *resp)
{
struct svc_rqst *rqstp = rq;
struct nfsd4_compoundargs *args = rqstp->rq_argp;
if (args->ops != args->iops) {
kfree(args->ops);
args->ops = args->iops;
}
kfree(args->tmpp);
args->tmpp = NULL;
while (args->to_free) {
struct svcxdr_tmpbuf *tb = args->to_free;
args->to_free = tb->next;
kfree(tb);
}
return 1;
}
int
nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compoundargs *args)
{
if (rqstp->rq_arg.head[0].iov_len % 4) {
/* client is nuts */
dprintk("%s: compound not properly padded! (peeraddr=%pISc xid=0x%x)",
__func__, svc_addr(rqstp), be32_to_cpu(rqstp->rq_xid));
return 0;
}
args->p = p;
args->end = rqstp->rq_arg.head[0].iov_base + rqstp->rq_arg.head[0].iov_len;
args->pagelist = rqstp->rq_arg.pages;
args->pagelen = rqstp->rq_arg.page_len;
args->tmpp = NULL;
args->to_free = NULL;
args->ops = args->iops;
args->rqstp = rqstp;
return !nfsd4_decode_compound(args);
}
int
nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compoundres *resp)
{
/*
* All that remains is to write the tag and operation count...
*/
struct xdr_buf *buf = resp->xdr.buf;
WARN_ON_ONCE(buf->len != buf->head[0].iov_len + buf->page_len +
buf->tail[0].iov_len);
rqstp->rq_next_page = resp->xdr.page_ptr + 1;
p = resp->tagp;
*p++ = htonl(resp->taglen);
memcpy(p, resp->tag, resp->taglen);
p += XDR_QUADLEN(resp->taglen);
*p++ = htonl(resp->opcnt);
nfsd4_sequence_done(resp);
return 1;
}
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| ./CrossVul/dataset_final_sorted/CWE-404/c/good_3351_6 |
crossvul-cpp_data_good_3351_8 | /*
* File operations used by nfsd. Some of these have been ripped from
* other parts of the kernel because they weren't exported, others
* are partial duplicates with added or changed functionality.
*
* Note that several functions dget() the dentry upon which they want
* to act, most notably those that create directory entries. Response
* dentry's are dput()'d if necessary in the release callback.
* So if you notice code paths that apparently fail to dput() the
* dentry, don't worry--they have been taken care of.
*
* Copyright (C) 1995-1999 Olaf Kirch <okir@monad.swb.de>
* Zerocpy NFS support (C) 2002 Hirokazu Takahashi <taka@valinux.co.jp>
*/
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/splice.h>
#include <linux/falloc.h>
#include <linux/fcntl.h>
#include <linux/namei.h>
#include <linux/delay.h>
#include <linux/fsnotify.h>
#include <linux/posix_acl_xattr.h>
#include <linux/xattr.h>
#include <linux/jhash.h>
#include <linux/ima.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/exportfs.h>
#include <linux/writeback.h>
#include <linux/security.h>
#ifdef CONFIG_NFSD_V3
#include "xdr3.h"
#endif /* CONFIG_NFSD_V3 */
#ifdef CONFIG_NFSD_V4
#include "../internal.h"
#include "acl.h"
#include "idmap.h"
#endif /* CONFIG_NFSD_V4 */
#include "nfsd.h"
#include "vfs.h"
#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_FILEOP
/*
* This is a cache of readahead params that help us choose the proper
* readahead strategy. Initially, we set all readahead parameters to 0
* and let the VFS handle things.
* If you increase the number of cached files very much, you'll need to
* add a hash table here.
*/
struct raparms {
struct raparms *p_next;
unsigned int p_count;
ino_t p_ino;
dev_t p_dev;
int p_set;
struct file_ra_state p_ra;
unsigned int p_hindex;
};
struct raparm_hbucket {
struct raparms *pb_head;
spinlock_t pb_lock;
} ____cacheline_aligned_in_smp;
#define RAPARM_HASH_BITS 4
#define RAPARM_HASH_SIZE (1<<RAPARM_HASH_BITS)
#define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1)
static struct raparm_hbucket raparm_hash[RAPARM_HASH_SIZE];
/*
* Called from nfsd_lookup and encode_dirent. Check if we have crossed
* a mount point.
* Returns -EAGAIN or -ETIMEDOUT leaving *dpp and *expp unchanged,
* or nfs_ok having possibly changed *dpp and *expp
*/
int
nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
struct svc_export **expp)
{
struct svc_export *exp = *expp, *exp2 = NULL;
struct dentry *dentry = *dpp;
struct path path = {.mnt = mntget(exp->ex_path.mnt),
.dentry = dget(dentry)};
int err = 0;
err = follow_down(&path);
if (err < 0)
goto out;
if (path.mnt == exp->ex_path.mnt && path.dentry == dentry &&
nfsd_mountpoint(dentry, exp) == 2) {
/* This is only a mountpoint in some other namespace */
path_put(&path);
goto out;
}
exp2 = rqst_exp_get_by_name(rqstp, &path);
if (IS_ERR(exp2)) {
err = PTR_ERR(exp2);
/*
* We normally allow NFS clients to continue
* "underneath" a mountpoint that is not exported.
* The exception is V4ROOT, where no traversal is ever
* allowed without an explicit export of the new
* directory.
*/
if (err == -ENOENT && !(exp->ex_flags & NFSEXP_V4ROOT))
err = 0;
path_put(&path);
goto out;
}
if (nfsd_v4client(rqstp) ||
(exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) {
/* successfully crossed mount point */
/*
* This is subtle: path.dentry is *not* on path.mnt
* at this point. The only reason we are safe is that
* original mnt is pinned down by exp, so we should
* put path *before* putting exp
*/
*dpp = path.dentry;
path.dentry = dentry;
*expp = exp2;
exp2 = exp;
}
path_put(&path);
exp_put(exp2);
out:
return err;
}
static void follow_to_parent(struct path *path)
{
struct dentry *dp;
while (path->dentry == path->mnt->mnt_root && follow_up(path))
;
dp = dget_parent(path->dentry);
dput(path->dentry);
path->dentry = dp;
}
static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, struct svc_export **exp, struct dentry **dentryp)
{
struct svc_export *exp2;
struct path path = {.mnt = mntget((*exp)->ex_path.mnt),
.dentry = dget(dparent)};
follow_to_parent(&path);
exp2 = rqst_exp_parent(rqstp, &path);
if (PTR_ERR(exp2) == -ENOENT) {
*dentryp = dget(dparent);
} else if (IS_ERR(exp2)) {
path_put(&path);
return PTR_ERR(exp2);
} else {
*dentryp = dget(path.dentry);
exp_put(*exp);
*exp = exp2;
}
path_put(&path);
return 0;
}
/*
* For nfsd purposes, we treat V4ROOT exports as though there was an
* export at *every* directory.
* We return:
* '1' if this dentry *must* be an export point,
* '2' if it might be, if there is really a mount here, and
* '0' if there is no chance of an export point here.
*/
int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp)
{
if (!d_inode(dentry))
return 0;
if (exp->ex_flags & NFSEXP_V4ROOT)
return 1;
if (nfsd4_is_junction(dentry))
return 1;
if (d_mountpoint(dentry))
/*
* Might only be a mountpoint in a different namespace,
* but we need to check.
*/
return 2;
return 0;
}
__be32
nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
const char *name, unsigned int len,
struct svc_export **exp_ret, struct dentry **dentry_ret)
{
struct svc_export *exp;
struct dentry *dparent;
struct dentry *dentry;
int host_err;
dprintk("nfsd: nfsd_lookup(fh %s, %.*s)\n", SVCFH_fmt(fhp), len,name);
dparent = fhp->fh_dentry;
exp = exp_get(fhp->fh_export);
/* Lookup the name, but don't follow links */
if (isdotent(name, len)) {
if (len==1)
dentry = dget(dparent);
else if (dparent != exp->ex_path.dentry)
dentry = dget_parent(dparent);
else if (!EX_NOHIDE(exp) && !nfsd_v4client(rqstp))
dentry = dget(dparent); /* .. == . just like at / */
else {
/* checking mountpoint crossing is very different when stepping up */
host_err = nfsd_lookup_parent(rqstp, dparent, &exp, &dentry);
if (host_err)
goto out_nfserr;
}
} else {
/*
* In the nfsd4_open() case, this may be held across
* subsequent open and delegation acquisition which may
* need to take the child's i_mutex:
*/
fh_lock_nested(fhp, I_MUTEX_PARENT);
dentry = lookup_one_len(name, dparent, len);
host_err = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_nfserr;
if (nfsd_mountpoint(dentry, exp)) {
/*
* We don't need the i_mutex after all. It's
* still possible we could open this (regular
* files can be mountpoints too), but the
* i_mutex is just there to prevent renames of
* something that we might be about to delegate,
* and a mountpoint won't be renamed:
*/
fh_unlock(fhp);
if ((host_err = nfsd_cross_mnt(rqstp, &dentry, &exp))) {
dput(dentry);
goto out_nfserr;
}
}
}
*dentry_ret = dentry;
*exp_ret = exp;
return 0;
out_nfserr:
exp_put(exp);
return nfserrno(host_err);
}
/*
* Look up one component of a pathname.
* N.B. After this call _both_ fhp and resfh need an fh_put
*
* If the lookup would cross a mountpoint, and the mounted filesystem
* is exported to the client with NFSEXP_NOHIDE, then the lookup is
* accepted as it stands and the mounted directory is
* returned. Otherwise the covered directory is returned.
* NOTE: this mountpoint crossing is not supported properly by all
* clients and is explicitly disallowed for NFSv3
* NeilBrown <neilb@cse.unsw.edu.au>
*/
__be32
nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
unsigned int len, struct svc_fh *resfh)
{
struct svc_export *exp;
struct dentry *dentry;
__be32 err;
err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
if (err)
return err;
err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry);
if (err)
return err;
err = check_nfsd_access(exp, rqstp);
if (err)
goto out;
/*
* Note: we compose the file handle now, but as the
* dentry may be negative, it may need to be updated.
*/
err = fh_compose(resfh, exp, dentry, fhp);
if (!err && d_really_is_negative(dentry))
err = nfserr_noent;
out:
dput(dentry);
exp_put(exp);
return err;
}
/*
* Commit metadata changes to stable storage.
*/
static int
commit_metadata(struct svc_fh *fhp)
{
struct inode *inode = d_inode(fhp->fh_dentry);
const struct export_operations *export_ops = inode->i_sb->s_export_op;
if (!EX_ISSYNC(fhp->fh_export))
return 0;
if (export_ops->commit_metadata)
return export_ops->commit_metadata(inode);
return sync_inode_metadata(inode, 1);
}
/*
* Go over the attributes and take care of the small differences between
* NFS semantics and what Linux expects.
*/
static void
nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
{
/* sanitize the mode change */
if (iap->ia_valid & ATTR_MODE) {
iap->ia_mode &= S_IALLUGO;
iap->ia_mode |= (inode->i_mode & ~S_IALLUGO);
}
/* Revoke setuid/setgid on chown */
if (!S_ISDIR(inode->i_mode) &&
((iap->ia_valid & ATTR_UID) || (iap->ia_valid & ATTR_GID))) {
iap->ia_valid |= ATTR_KILL_PRIV;
if (iap->ia_valid & ATTR_MODE) {
/* we're setting mode too, just clear the s*id bits */
iap->ia_mode &= ~S_ISUID;
if (iap->ia_mode & S_IXGRP)
iap->ia_mode &= ~S_ISGID;
} else {
/* set ATTR_KILL_* bits and let VFS handle it */
iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
}
}
}
static __be32
nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct iattr *iap)
{
struct inode *inode = d_inode(fhp->fh_dentry);
int host_err;
if (iap->ia_size < inode->i_size) {
__be32 err;
err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
if (err)
return err;
}
host_err = get_write_access(inode);
if (host_err)
goto out_nfserrno;
host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
if (host_err)
goto out_put_write_access;
return 0;
out_put_write_access:
put_write_access(inode);
out_nfserrno:
return nfserrno(host_err);
}
/*
* Set various file attributes. After this call fhp needs an fh_put.
*/
__be32
nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
int check_guard, time_t guardtime)
{
struct dentry *dentry;
struct inode *inode;
int accmode = NFSD_MAY_SATTR;
umode_t ftype = 0;
__be32 err;
int host_err;
bool get_write_count;
bool size_change = (iap->ia_valid & ATTR_SIZE);
if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
if (iap->ia_valid & ATTR_SIZE)
ftype = S_IFREG;
/* Callers that do fh_verify should do the fh_want_write: */
get_write_count = !fhp->fh_dentry;
/* Get inode */
err = fh_verify(rqstp, fhp, ftype, accmode);
if (err)
return err;
if (get_write_count) {
host_err = fh_want_write(fhp);
if (host_err)
goto out;
}
dentry = fhp->fh_dentry;
inode = d_inode(dentry);
/* Ignore any mode updates on symlinks */
if (S_ISLNK(inode->i_mode))
iap->ia_valid &= ~ATTR_MODE;
if (!iap->ia_valid)
return 0;
nfsd_sanitize_attrs(inode, iap);
if (check_guard && guardtime != inode->i_ctime.tv_sec)
return nfserr_notsync;
/*
* The size case is special, it changes the file in addition to the
* attributes, and file systems don't expect it to be mixed with
* "random" attribute changes. We thus split out the size change
* into a separate call to ->setattr, and do the rest as a separate
* setattr call.
*/
if (size_change) {
err = nfsd_get_write_access(rqstp, fhp, iap);
if (err)
return err;
}
fh_lock(fhp);
if (size_change) {
/*
* RFC5661, Section 18.30.4:
* Changing the size of a file with SETATTR indirectly
* changes the time_modify and change attributes.
*
* (and similar for the older RFCs)
*/
struct iattr size_attr = {
.ia_valid = ATTR_SIZE | ATTR_CTIME | ATTR_MTIME,
.ia_size = iap->ia_size,
};
host_err = notify_change(dentry, &size_attr, NULL);
if (host_err)
goto out_unlock;
iap->ia_valid &= ~ATTR_SIZE;
/*
* Avoid the additional setattr call below if the only other
* attribute that the client sends is the mtime, as we update
* it as part of the size change above.
*/
if ((iap->ia_valid & ~ATTR_MTIME) == 0)
goto out_unlock;
}
iap->ia_valid |= ATTR_CTIME;
host_err = notify_change(dentry, iap, NULL);
out_unlock:
fh_unlock(fhp);
if (size_change)
put_write_access(inode);
out:
if (!host_err)
host_err = commit_metadata(fhp);
return nfserrno(host_err);
}
#if defined(CONFIG_NFSD_V4)
/*
* NFS junction information is stored in an extended attribute.
*/
#define NFSD_JUNCTION_XATTR_NAME XATTR_TRUSTED_PREFIX "junction.nfs"
/**
* nfsd4_is_junction - Test if an object could be an NFS junction
*
* @dentry: object to test
*
* Returns 1 if "dentry" appears to contain NFS junction information.
* Otherwise 0 is returned.
*/
int nfsd4_is_junction(struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
if (inode == NULL)
return 0;
if (inode->i_mode & S_IXUGO)
return 0;
if (!(inode->i_mode & S_ISVTX))
return 0;
if (vfs_getxattr(dentry, NFSD_JUNCTION_XATTR_NAME, NULL, 0) <= 0)
return 0;
return 1;
}
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
__be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct xdr_netobj *label)
{
__be32 error;
int host_error;
struct dentry *dentry;
error = fh_verify(rqstp, fhp, 0 /* S_IFREG */, NFSD_MAY_SATTR);
if (error)
return error;
dentry = fhp->fh_dentry;
inode_lock(d_inode(dentry));
host_error = security_inode_setsecctx(dentry, label->data, label->len);
inode_unlock(d_inode(dentry));
return nfserrno(host_error);
}
#else
__be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct xdr_netobj *label)
{
return nfserr_notsupp;
}
#endif
__be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
u64 dst_pos, u64 count)
{
return nfserrno(do_clone_file_range(src, src_pos, dst, dst_pos, count));
}
ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
u64 dst_pos, u64 count)
{
/*
* Limit copy to 4MB to prevent indefinitely blocking an nfsd
* thread and client rpc slot. The choice of 4MB is somewhat
* arbitrary. We might instead base this on r/wsize, or make it
* tunable, or use a time instead of a byte limit, or implement
* asynchronous copy. In theory a client could also recognize a
* limit like this and pipeline multiple COPY requests.
*/
count = min_t(u64, count, 1 << 22);
return vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0);
}
__be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct file *file, loff_t offset, loff_t len,
int flags)
{
int error;
if (!S_ISREG(file_inode(file)->i_mode))
return nfserr_inval;
error = vfs_fallocate(file, flags, offset, len);
if (!error)
error = commit_metadata(fhp);
return nfserrno(error);
}
#endif /* defined(CONFIG_NFSD_V4) */
#ifdef CONFIG_NFSD_V3
/*
* Check server access rights to a file system object
*/
struct accessmap {
u32 access;
int how;
};
static struct accessmap nfs3_regaccess[] = {
{ NFS3_ACCESS_READ, NFSD_MAY_READ },
{ NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC },
{ NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_TRUNC },
{ NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE },
{ 0, 0 }
};
static struct accessmap nfs3_diraccess[] = {
{ NFS3_ACCESS_READ, NFSD_MAY_READ },
{ NFS3_ACCESS_LOOKUP, NFSD_MAY_EXEC },
{ NFS3_ACCESS_MODIFY, NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC},
{ NFS3_ACCESS_EXTEND, NFSD_MAY_EXEC|NFSD_MAY_WRITE },
{ NFS3_ACCESS_DELETE, NFSD_MAY_REMOVE },
{ 0, 0 }
};
static struct accessmap nfs3_anyaccess[] = {
/* Some clients - Solaris 2.6 at least, make an access call
* to the server to check for access for things like /dev/null
* (which really, the server doesn't care about). So
* We provide simple access checking for them, looking
* mainly at mode bits, and we make sure to ignore read-only
* filesystem checks
*/
{ NFS3_ACCESS_READ, NFSD_MAY_READ },
{ NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC },
{ NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS },
{ NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS },
{ 0, 0 }
};
__be32
nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *supported)
{
struct accessmap *map;
struct svc_export *export;
struct dentry *dentry;
u32 query, result = 0, sresult = 0;
__be32 error;
error = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP);
if (error)
goto out;
export = fhp->fh_export;
dentry = fhp->fh_dentry;
if (d_is_reg(dentry))
map = nfs3_regaccess;
else if (d_is_dir(dentry))
map = nfs3_diraccess;
else
map = nfs3_anyaccess;
query = *access;
for (; map->access; map++) {
if (map->access & query) {
__be32 err2;
sresult |= map->access;
err2 = nfsd_permission(rqstp, export, dentry, map->how);
switch (err2) {
case nfs_ok:
result |= map->access;
break;
/* the following error codes just mean the access was not allowed,
* rather than an error occurred */
case nfserr_rofs:
case nfserr_acces:
case nfserr_perm:
/* simply don't "or" in the access bit. */
break;
default:
error = err2;
goto out;
}
}
}
*access = result;
if (supported)
*supported = sresult;
out:
return error;
}
#endif /* CONFIG_NFSD_V3 */
static int nfsd_open_break_lease(struct inode *inode, int access)
{
unsigned int mode;
if (access & NFSD_MAY_NOT_BREAK_LEASE)
return 0;
mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY;
return break_lease(inode, mode | O_NONBLOCK);
}
/*
* Open an existing file or directory.
* The may_flags argument indicates the type of open (read/write/lock)
* and additional flags.
* N.B. After this call fhp needs an fh_put
*/
__be32
nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
int may_flags, struct file **filp)
{
struct path path;
struct inode *inode;
struct file *file;
int flags = O_RDONLY|O_LARGEFILE;
__be32 err;
int host_err = 0;
validate_process_creds();
/*
* If we get here, then the client has already done an "open",
* and (hopefully) checked permission - so allow OWNER_OVERRIDE
* in case a chmod has now revoked permission.
*
* Arguably we should also allow the owner override for
* directories, but we never have and it doesn't seem to have
* caused anyone a problem. If we were to change this, note
* also that our filldir callbacks would need a variant of
* lookup_one_len that doesn't check permissions.
*/
if (type == S_IFREG)
may_flags |= NFSD_MAY_OWNER_OVERRIDE;
err = fh_verify(rqstp, fhp, type, may_flags);
if (err)
goto out;
path.mnt = fhp->fh_export->ex_path.mnt;
path.dentry = fhp->fh_dentry;
inode = d_inode(path.dentry);
/* Disallow write access to files with the append-only bit set
* or any access when mandatory locking enabled
*/
err = nfserr_perm;
if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
goto out;
/*
* We must ignore files (but only files) which might have mandatory
* locks on them because there is no way to know if the accesser has
* the lock.
*/
if (S_ISREG((inode)->i_mode) && mandatory_lock(inode))
goto out;
if (!inode->i_fop)
goto out;
host_err = nfsd_open_break_lease(inode, may_flags);
if (host_err) /* NOMEM or WOULDBLOCK */
goto out_nfserr;
if (may_flags & NFSD_MAY_WRITE) {
if (may_flags & NFSD_MAY_READ)
flags = O_RDWR|O_LARGEFILE;
else
flags = O_WRONLY|O_LARGEFILE;
}
file = dentry_open(&path, flags, current_cred());
if (IS_ERR(file)) {
host_err = PTR_ERR(file);
goto out_nfserr;
}
host_err = ima_file_check(file, may_flags, 0);
if (host_err) {
fput(file);
goto out_nfserr;
}
if (may_flags & NFSD_MAY_64BIT_COOKIE)
file->f_mode |= FMODE_64BITHASH;
else
file->f_mode |= FMODE_32BITHASH;
*filp = file;
out_nfserr:
err = nfserrno(host_err);
out:
validate_process_creds();
return err;
}
struct raparms *
nfsd_init_raparms(struct file *file)
{
struct inode *inode = file_inode(file);
dev_t dev = inode->i_sb->s_dev;
ino_t ino = inode->i_ino;
struct raparms *ra, **rap, **frap = NULL;
int depth = 0;
unsigned int hash;
struct raparm_hbucket *rab;
hash = jhash_2words(dev, ino, 0xfeedbeef) & RAPARM_HASH_MASK;
rab = &raparm_hash[hash];
spin_lock(&rab->pb_lock);
for (rap = &rab->pb_head; (ra = *rap); rap = &ra->p_next) {
if (ra->p_ino == ino && ra->p_dev == dev)
goto found;
depth++;
if (ra->p_count == 0)
frap = rap;
}
depth = nfsdstats.ra_size;
if (!frap) {
spin_unlock(&rab->pb_lock);
return NULL;
}
rap = frap;
ra = *frap;
ra->p_dev = dev;
ra->p_ino = ino;
ra->p_set = 0;
ra->p_hindex = hash;
found:
if (rap != &rab->pb_head) {
*rap = ra->p_next;
ra->p_next = rab->pb_head;
rab->pb_head = ra;
}
ra->p_count++;
nfsdstats.ra_depth[depth*10/nfsdstats.ra_size]++;
spin_unlock(&rab->pb_lock);
if (ra->p_set)
file->f_ra = ra->p_ra;
return ra;
}
void nfsd_put_raparams(struct file *file, struct raparms *ra)
{
struct raparm_hbucket *rab = &raparm_hash[ra->p_hindex];
spin_lock(&rab->pb_lock);
ra->p_ra = file->f_ra;
ra->p_set = 1;
ra->p_count--;
spin_unlock(&rab->pb_lock);
}
/*
* Grab and keep cached pages associated with a file in the svc_rqst
* so that they can be passed to the network sendmsg/sendpage routines
* directly. They will be released after the sending has completed.
*/
static int
nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct splice_desc *sd)
{
struct svc_rqst *rqstp = sd->u.data;
struct page **pp = rqstp->rq_next_page;
struct page *page = buf->page;
size_t size;
size = sd->len;
if (rqstp->rq_res.page_len == 0) {
get_page(page);
put_page(*rqstp->rq_next_page);
*(rqstp->rq_next_page++) = page;
rqstp->rq_res.page_base = buf->offset;
rqstp->rq_res.page_len = size;
} else if (page != pp[-1]) {
get_page(page);
if (*rqstp->rq_next_page)
put_page(*rqstp->rq_next_page);
*(rqstp->rq_next_page++) = page;
rqstp->rq_res.page_len += size;
} else
rqstp->rq_res.page_len += size;
return size;
}
static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe,
struct splice_desc *sd)
{
return __splice_from_pipe(pipe, sd, nfsd_splice_actor);
}
static __be32
nfsd_finish_read(struct file *file, unsigned long *count, int host_err)
{
if (host_err >= 0) {
nfsdstats.io_read += host_err;
*count = host_err;
fsnotify_access(file);
return 0;
} else
return nfserrno(host_err);
}
__be32 nfsd_splice_read(struct svc_rqst *rqstp,
struct file *file, loff_t offset, unsigned long *count)
{
struct splice_desc sd = {
.len = 0,
.total_len = *count,
.pos = offset,
.u.data = rqstp,
};
int host_err;
rqstp->rq_next_page = rqstp->rq_respages + 1;
host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor);
return nfsd_finish_read(file, count, host_err);
}
__be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
unsigned long *count)
{
mm_segment_t oldfs;
int host_err;
oldfs = get_fs();
set_fs(KERNEL_DS);
host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset, 0);
set_fs(oldfs);
return nfsd_finish_read(file, count, host_err);
}
static __be32
nfsd_vfs_read(struct svc_rqst *rqstp, struct file *file,
loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
{
if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &rqstp->rq_flags))
return nfsd_splice_read(rqstp, file, offset, count);
else
return nfsd_readv(file, offset, vec, vlen, count);
}
/*
* Gathered writes: If another process is currently writing to the file,
* there's a high chance this is another nfsd (triggered by a bulk write
* from a client's biod). Rather than syncing the file with each write
* request, we sleep for 10 msec.
*
* I don't know if this roughly approximates C. Juszak's idea of
* gathered writes, but it's a nice and simple solution (IMHO), and it
* seems to work:-)
*
* Note: we do this only in the NFSv2 case, since v3 and higher have a
* better tool (separate unstable writes and commits) for solving this
* problem.
*/
static int wait_for_concurrent_writes(struct file *file)
{
struct inode *inode = file_inode(file);
static ino_t last_ino;
static dev_t last_dev;
int err = 0;
if (atomic_read(&inode->i_writecount) > 1
|| (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
dprintk("nfsd: write defer %d\n", task_pid_nr(current));
msleep(10);
dprintk("nfsd: write resume %d\n", task_pid_nr(current));
}
if (inode->i_state & I_DIRTY) {
dprintk("nfsd: write sync %d\n", task_pid_nr(current));
err = vfs_fsync(file, 0);
}
last_ino = inode->i_ino;
last_dev = inode->i_sb->s_dev;
return err;
}
__be32
nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
loff_t offset, struct kvec *vec, int vlen,
unsigned long *cnt, int stable)
{
struct svc_export *exp;
mm_segment_t oldfs;
__be32 err = 0;
int host_err;
int use_wgather;
loff_t pos = offset;
unsigned int pflags = current->flags;
int flags = 0;
if (test_bit(RQ_LOCAL, &rqstp->rq_flags))
/*
* We want less throttling in balance_dirty_pages()
* and shrink_inactive_list() so that nfs to
* localhost doesn't cause nfsd to lock up due to all
* the client's dirty pages or its congested queue.
*/
current->flags |= PF_LESS_THROTTLE;
exp = fhp->fh_export;
use_wgather = (rqstp->rq_vers == 2) && EX_WGATHER(exp);
if (!EX_ISSYNC(exp))
stable = NFS_UNSTABLE;
if (stable && !use_wgather)
flags |= RWF_SYNC;
/* Write the data. */
oldfs = get_fs(); set_fs(KERNEL_DS);
host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos, flags);
set_fs(oldfs);
if (host_err < 0)
goto out_nfserr;
*cnt = host_err;
nfsdstats.io_write += host_err;
fsnotify_modify(file);
if (stable && use_wgather)
host_err = wait_for_concurrent_writes(file);
out_nfserr:
dprintk("nfsd: write complete host_err=%d\n", host_err);
if (host_err >= 0)
err = 0;
else
err = nfserrno(host_err);
if (test_bit(RQ_LOCAL, &rqstp->rq_flags))
current_restore_flags(pflags, PF_LESS_THROTTLE);
return err;
}
/*
* Read data from a file. count must contain the requested read count
* on entry. On return, *count contains the number of bytes actually read.
* N.B. After this call fhp needs an fh_put
*/
__be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
{
struct file *file;
struct raparms *ra;
__be32 err;
trace_read_start(rqstp, fhp, offset, vlen);
err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
if (err)
return err;
ra = nfsd_init_raparms(file);
trace_read_opened(rqstp, fhp, offset, vlen);
err = nfsd_vfs_read(rqstp, file, offset, vec, vlen, count);
trace_read_io_done(rqstp, fhp, offset, vlen);
if (ra)
nfsd_put_raparams(file, ra);
fput(file);
trace_read_done(rqstp, fhp, offset, vlen);
return err;
}
/*
* Write data to a file.
* The stable flag requests synchronous writes.
* N.B. After this call fhp needs an fh_put
*/
__be32
nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
struct kvec *vec, int vlen, unsigned long *cnt, int stable)
{
struct file *file = NULL;
__be32 err = 0;
trace_write_start(rqstp, fhp, offset, vlen);
err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_WRITE, &file);
if (err)
goto out;
trace_write_opened(rqstp, fhp, offset, vlen);
err = nfsd_vfs_write(rqstp, fhp, file, offset, vec, vlen, cnt, stable);
trace_write_io_done(rqstp, fhp, offset, vlen);
fput(file);
out:
trace_write_done(rqstp, fhp, offset, vlen);
return err;
}
#ifdef CONFIG_NFSD_V3
/*
* Commit all pending writes to stable storage.
*
* Note: we only guarantee that data that lies within the range specified
* by the 'offset' and 'count' parameters will be synced.
*
* Unfortunately we cannot lock the file to make sure we return full WCC
* data to the client, as locking happens lower down in the filesystem.
*/
__be32
nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
loff_t offset, unsigned long count)
{
struct file *file;
loff_t end = LLONG_MAX;
__be32 err = nfserr_inval;
if (offset < 0)
goto out;
if (count != 0) {
end = offset + (loff_t)count - 1;
if (end < offset)
goto out;
}
err = nfsd_open(rqstp, fhp, S_IFREG,
NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &file);
if (err)
goto out;
if (EX_ISSYNC(fhp->fh_export)) {
int err2 = vfs_fsync_range(file, offset, end, 0);
if (err2 != -EINVAL)
err = nfserrno(err2);
else
err = nfserr_notsupp;
}
fput(file);
out:
return err;
}
#endif /* CONFIG_NFSD_V3 */
static __be32
nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *resfhp,
struct iattr *iap)
{
/*
* Mode has already been set earlier in create:
*/
iap->ia_valid &= ~ATTR_MODE;
/*
* Setting uid/gid works only for root. Irix appears to
* send along the gid on create when it tries to implement
* setgid directories via NFS:
*/
if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID))
iap->ia_valid &= ~(ATTR_UID|ATTR_GID);
if (iap->ia_valid)
return nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0);
/* Callers expect file metadata to be committed here */
return nfserrno(commit_metadata(resfhp));
}
/* HPUX client sometimes creates a file in mode 000, and sets size to 0.
* setting size to 0 may fail for some specific file systems by the permission
* checking which requires WRITE permission but the mode is 000.
* we ignore the resizing(to 0) on the just new created file, since the size is
* 0 after file created.
*
* call this only after vfs_create() is called.
* */
static void
nfsd_check_ignore_resizing(struct iattr *iap)
{
if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0))
iap->ia_valid &= ~ATTR_SIZE;
}
/* The parent directory should already be locked: */
__be32
nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
char *fname, int flen, struct iattr *iap,
int type, dev_t rdev, struct svc_fh *resfhp)
{
struct dentry *dentry, *dchild;
struct inode *dirp;
__be32 err;
__be32 err2;
int host_err;
dentry = fhp->fh_dentry;
dirp = d_inode(dentry);
dchild = dget(resfhp->fh_dentry);
if (!fhp->fh_locked) {
WARN_ONCE(1, "nfsd_create: parent %pd2 not locked!\n",
dentry);
err = nfserr_io;
goto out;
}
err = nfsd_permission(rqstp, fhp->fh_export, dentry, NFSD_MAY_CREATE);
if (err)
goto out;
if (!(iap->ia_valid & ATTR_MODE))
iap->ia_mode = 0;
iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type;
err = 0;
host_err = 0;
switch (type) {
case S_IFREG:
host_err = vfs_create(dirp, dchild, iap->ia_mode, true);
if (!host_err)
nfsd_check_ignore_resizing(iap);
break;
case S_IFDIR:
host_err = vfs_mkdir(dirp, dchild, iap->ia_mode);
break;
case S_IFCHR:
case S_IFBLK:
case S_IFIFO:
case S_IFSOCK:
host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev);
break;
default:
printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n",
type);
host_err = -EINVAL;
}
if (host_err < 0)
goto out_nfserr;
err = nfsd_create_setattr(rqstp, resfhp, iap);
/*
* nfsd_create_setattr already committed the child. Transactional
* filesystems had a chance to commit changes for both parent and
* child simultaneously making the following commit_metadata a
* noop.
*/
err2 = nfserrno(commit_metadata(fhp));
if (err2)
err = err2;
/*
* Update the file handle to get the new inode info.
*/
if (!err)
err = fh_update(resfhp);
out:
dput(dchild);
return err;
out_nfserr:
err = nfserrno(host_err);
goto out;
}
/*
* Create a filesystem object (regular, directory, special).
* Note that the parent directory is left locked.
*
* N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp
*/
__be32
nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
char *fname, int flen, struct iattr *iap,
int type, dev_t rdev, struct svc_fh *resfhp)
{
struct dentry *dentry, *dchild = NULL;
struct inode *dirp;
__be32 err;
int host_err;
if (isdotent(fname, flen))
return nfserr_exist;
err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_NOP);
if (err)
return err;
dentry = fhp->fh_dentry;
dirp = d_inode(dentry);
host_err = fh_want_write(fhp);
if (host_err)
return nfserrno(host_err);
fh_lock_nested(fhp, I_MUTEX_PARENT);
dchild = lookup_one_len(fname, dentry, flen);
host_err = PTR_ERR(dchild);
if (IS_ERR(dchild))
return nfserrno(host_err);
err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
/*
* We unconditionally drop our ref to dchild as fh_compose will have
* already grabbed its own ref for it.
*/
dput(dchild);
if (err)
return err;
return nfsd_create_locked(rqstp, fhp, fname, flen, iap, type,
rdev, resfhp);
}
#ifdef CONFIG_NFSD_V3
/*
* NFSv3 and NFSv4 version of nfsd_create
*/
__be32
do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
char *fname, int flen, struct iattr *iap,
struct svc_fh *resfhp, int createmode, u32 *verifier,
bool *truncp, bool *created)
{
struct dentry *dentry, *dchild = NULL;
struct inode *dirp;
__be32 err;
int host_err;
__u32 v_mtime=0, v_atime=0;
err = nfserr_perm;
if (!flen)
goto out;
err = nfserr_exist;
if (isdotent(fname, flen))
goto out;
if (!(iap->ia_valid & ATTR_MODE))
iap->ia_mode = 0;
err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
if (err)
goto out;
dentry = fhp->fh_dentry;
dirp = d_inode(dentry);
host_err = fh_want_write(fhp);
if (host_err)
goto out_nfserr;
fh_lock_nested(fhp, I_MUTEX_PARENT);
/*
* Compose the response file handle.
*/
dchild = lookup_one_len(fname, dentry, flen);
host_err = PTR_ERR(dchild);
if (IS_ERR(dchild))
goto out_nfserr;
/* If file doesn't exist, check for permissions to create one */
if (d_really_is_negative(dchild)) {
err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
if (err)
goto out;
}
err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
if (err)
goto out;
if (nfsd_create_is_exclusive(createmode)) {
/* solaris7 gets confused (bugid 4218508) if these have
* the high bit set, so just clear the high bits. If this is
* ever changed to use different attrs for storing the
* verifier, then do_open_lookup() will also need to be fixed
* accordingly.
*/
v_mtime = verifier[0]&0x7fffffff;
v_atime = verifier[1]&0x7fffffff;
}
if (d_really_is_positive(dchild)) {
err = 0;
switch (createmode) {
case NFS3_CREATE_UNCHECKED:
if (! d_is_reg(dchild))
goto out;
else if (truncp) {
/* in nfsv4, we need to treat this case a little
* differently. we don't want to truncate the
* file now; this would be wrong if the OPEN
* fails for some other reason. furthermore,
* if the size is nonzero, we should ignore it
* according to spec!
*/
*truncp = (iap->ia_valid & ATTR_SIZE) && !iap->ia_size;
}
else {
iap->ia_valid &= ATTR_SIZE;
goto set_attr;
}
break;
case NFS3_CREATE_EXCLUSIVE:
if ( d_inode(dchild)->i_mtime.tv_sec == v_mtime
&& d_inode(dchild)->i_atime.tv_sec == v_atime
&& d_inode(dchild)->i_size == 0 ) {
if (created)
*created = 1;
break;
}
case NFS4_CREATE_EXCLUSIVE4_1:
if ( d_inode(dchild)->i_mtime.tv_sec == v_mtime
&& d_inode(dchild)->i_atime.tv_sec == v_atime
&& d_inode(dchild)->i_size == 0 ) {
if (created)
*created = 1;
goto set_attr;
}
/* fallthru */
case NFS3_CREATE_GUARDED:
err = nfserr_exist;
}
fh_drop_write(fhp);
goto out;
}
host_err = vfs_create(dirp, dchild, iap->ia_mode, true);
if (host_err < 0) {
fh_drop_write(fhp);
goto out_nfserr;
}
if (created)
*created = 1;
nfsd_check_ignore_resizing(iap);
if (nfsd_create_is_exclusive(createmode)) {
/* Cram the verifier into atime/mtime */
iap->ia_valid = ATTR_MTIME|ATTR_ATIME
| ATTR_MTIME_SET|ATTR_ATIME_SET;
/* XXX someone who knows this better please fix it for nsec */
iap->ia_mtime.tv_sec = v_mtime;
iap->ia_atime.tv_sec = v_atime;
iap->ia_mtime.tv_nsec = 0;
iap->ia_atime.tv_nsec = 0;
}
set_attr:
err = nfsd_create_setattr(rqstp, resfhp, iap);
/*
* nfsd_create_setattr already committed the child
* (and possibly also the parent).
*/
if (!err)
err = nfserrno(commit_metadata(fhp));
/*
* Update the filehandle to get the new inode info.
*/
if (!err)
err = fh_update(resfhp);
out:
fh_unlock(fhp);
if (dchild && !IS_ERR(dchild))
dput(dchild);
fh_drop_write(fhp);
return err;
out_nfserr:
err = nfserrno(host_err);
goto out;
}
#endif /* CONFIG_NFSD_V3 */
/*
* Read a symlink. On entry, *lenp must contain the maximum path length that
* fits into the buffer. On return, it contains the true length.
* N.B. After this call fhp needs an fh_put
*/
__be32
nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
{
mm_segment_t oldfs;
__be32 err;
int host_err;
struct path path;
err = fh_verify(rqstp, fhp, S_IFLNK, NFSD_MAY_NOP);
if (err)
goto out;
path.mnt = fhp->fh_export->ex_path.mnt;
path.dentry = fhp->fh_dentry;
err = nfserr_inval;
if (!d_is_symlink(path.dentry))
goto out;
touch_atime(&path);
/* N.B. Why does this call need a get_fs()??
* Remove the set_fs and watch the fireworks:-) --okir
*/
oldfs = get_fs(); set_fs(KERNEL_DS);
host_err = vfs_readlink(path.dentry, (char __user *)buf, *lenp);
set_fs(oldfs);
if (host_err < 0)
goto out_nfserr;
*lenp = host_err;
err = 0;
out:
return err;
out_nfserr:
err = nfserrno(host_err);
goto out;
}
/*
* Create a symlink and look up its inode
* N.B. After this call _both_ fhp and resfhp need an fh_put
*/
__be32
nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
char *fname, int flen,
char *path,
struct svc_fh *resfhp)
{
struct dentry *dentry, *dnew;
__be32 err, cerr;
int host_err;
err = nfserr_noent;
if (!flen || path[0] == '\0')
goto out;
err = nfserr_exist;
if (isdotent(fname, flen))
goto out;
err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
if (err)
goto out;
host_err = fh_want_write(fhp);
if (host_err)
goto out_nfserr;
fh_lock(fhp);
dentry = fhp->fh_dentry;
dnew = lookup_one_len(fname, dentry, flen);
host_err = PTR_ERR(dnew);
if (IS_ERR(dnew))
goto out_nfserr;
host_err = vfs_symlink(d_inode(dentry), dnew, path);
err = nfserrno(host_err);
if (!err)
err = nfserrno(commit_metadata(fhp));
fh_unlock(fhp);
fh_drop_write(fhp);
cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
dput(dnew);
if (err==0) err = cerr;
out:
return err;
out_nfserr:
err = nfserrno(host_err);
goto out;
}
/*
* Create a hardlink
* N.B. After this call _both_ ffhp and tfhp need an fh_put
*/
__be32
nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
char *name, int len, struct svc_fh *tfhp)
{
struct dentry *ddir, *dnew, *dold;
struct inode *dirp;
__be32 err;
int host_err;
err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_CREATE);
if (err)
goto out;
err = fh_verify(rqstp, tfhp, 0, NFSD_MAY_NOP);
if (err)
goto out;
err = nfserr_isdir;
if (d_is_dir(tfhp->fh_dentry))
goto out;
err = nfserr_perm;
if (!len)
goto out;
err = nfserr_exist;
if (isdotent(name, len))
goto out;
host_err = fh_want_write(tfhp);
if (host_err) {
err = nfserrno(host_err);
goto out;
}
fh_lock_nested(ffhp, I_MUTEX_PARENT);
ddir = ffhp->fh_dentry;
dirp = d_inode(ddir);
dnew = lookup_one_len(name, ddir, len);
host_err = PTR_ERR(dnew);
if (IS_ERR(dnew))
goto out_nfserr;
dold = tfhp->fh_dentry;
err = nfserr_noent;
if (d_really_is_negative(dold))
goto out_dput;
host_err = vfs_link(dold, dirp, dnew, NULL);
if (!host_err) {
err = nfserrno(commit_metadata(ffhp));
if (!err)
err = nfserrno(commit_metadata(tfhp));
} else {
if (host_err == -EXDEV && rqstp->rq_vers == 2)
err = nfserr_acces;
else
err = nfserrno(host_err);
}
out_dput:
dput(dnew);
out_unlock:
fh_unlock(ffhp);
fh_drop_write(tfhp);
out:
return err;
out_nfserr:
err = nfserrno(host_err);
goto out_unlock;
}
/*
* Rename a file
* N.B. After this call _both_ ffhp and tfhp need an fh_put
*/
__be32
nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
struct svc_fh *tfhp, char *tname, int tlen)
{
struct dentry *fdentry, *tdentry, *odentry, *ndentry, *trap;
struct inode *fdir, *tdir;
__be32 err;
int host_err;
err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_REMOVE);
if (err)
goto out;
err = fh_verify(rqstp, tfhp, S_IFDIR, NFSD_MAY_CREATE);
if (err)
goto out;
fdentry = ffhp->fh_dentry;
fdir = d_inode(fdentry);
tdentry = tfhp->fh_dentry;
tdir = d_inode(tdentry);
err = nfserr_perm;
if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
goto out;
host_err = fh_want_write(ffhp);
if (host_err) {
err = nfserrno(host_err);
goto out;
}
/* cannot use fh_lock as we need deadlock protective ordering
* so do it by hand */
trap = lock_rename(tdentry, fdentry);
ffhp->fh_locked = tfhp->fh_locked = true;
fill_pre_wcc(ffhp);
fill_pre_wcc(tfhp);
odentry = lookup_one_len(fname, fdentry, flen);
host_err = PTR_ERR(odentry);
if (IS_ERR(odentry))
goto out_nfserr;
host_err = -ENOENT;
if (d_really_is_negative(odentry))
goto out_dput_old;
host_err = -EINVAL;
if (odentry == trap)
goto out_dput_old;
ndentry = lookup_one_len(tname, tdentry, tlen);
host_err = PTR_ERR(ndentry);
if (IS_ERR(ndentry))
goto out_dput_old;
host_err = -ENOTEMPTY;
if (ndentry == trap)
goto out_dput_new;
host_err = -EXDEV;
if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
goto out_dput_new;
if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
goto out_dput_new;
host_err = vfs_rename(fdir, odentry, tdir, ndentry, NULL, 0);
if (!host_err) {
host_err = commit_metadata(tfhp);
if (!host_err)
host_err = commit_metadata(ffhp);
}
out_dput_new:
dput(ndentry);
out_dput_old:
dput(odentry);
out_nfserr:
err = nfserrno(host_err);
/*
* We cannot rely on fh_unlock on the two filehandles,
* as that would do the wrong thing if the two directories
* were the same, so again we do it by hand.
*/
fill_post_wcc(ffhp);
fill_post_wcc(tfhp);
unlock_rename(tdentry, fdentry);
ffhp->fh_locked = tfhp->fh_locked = false;
fh_drop_write(ffhp);
out:
return err;
}
/*
* Unlink a file or directory
* N.B. After this call fhp needs an fh_put
*/
__be32
nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
char *fname, int flen)
{
struct dentry *dentry, *rdentry;
struct inode *dirp;
__be32 err;
int host_err;
err = nfserr_acces;
if (!flen || isdotent(fname, flen))
goto out;
err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_REMOVE);
if (err)
goto out;
host_err = fh_want_write(fhp);
if (host_err)
goto out_nfserr;
fh_lock_nested(fhp, I_MUTEX_PARENT);
dentry = fhp->fh_dentry;
dirp = d_inode(dentry);
rdentry = lookup_one_len(fname, dentry, flen);
host_err = PTR_ERR(rdentry);
if (IS_ERR(rdentry))
goto out_nfserr;
if (d_really_is_negative(rdentry)) {
dput(rdentry);
err = nfserr_noent;
goto out;
}
if (!type)
type = d_inode(rdentry)->i_mode & S_IFMT;
if (type != S_IFDIR)
host_err = vfs_unlink(dirp, rdentry, NULL);
else
host_err = vfs_rmdir(dirp, rdentry);
if (!host_err)
host_err = commit_metadata(fhp);
dput(rdentry);
out_nfserr:
err = nfserrno(host_err);
out:
return err;
}
/*
* We do this buffering because we must not call back into the file
* system's ->lookup() method from the filldir callback. That may well
* deadlock a number of file systems.
*
* This is based heavily on the implementation of same in XFS.
*/
struct buffered_dirent {
u64 ino;
loff_t offset;
int namlen;
unsigned int d_type;
char name[];
};
struct readdir_data {
struct dir_context ctx;
char *dirent;
size_t used;
int full;
};
static int nfsd_buffered_filldir(struct dir_context *ctx, const char *name,
int namlen, loff_t offset, u64 ino,
unsigned int d_type)
{
struct readdir_data *buf =
container_of(ctx, struct readdir_data, ctx);
struct buffered_dirent *de = (void *)(buf->dirent + buf->used);
unsigned int reclen;
reclen = ALIGN(sizeof(struct buffered_dirent) + namlen, sizeof(u64));
if (buf->used + reclen > PAGE_SIZE) {
buf->full = 1;
return -EINVAL;
}
de->namlen = namlen;
de->offset = offset;
de->ino = ino;
de->d_type = d_type;
memcpy(de->name, name, namlen);
buf->used += reclen;
return 0;
}
static __be32 nfsd_buffered_readdir(struct file *file, nfsd_filldir_t func,
struct readdir_cd *cdp, loff_t *offsetp)
{
struct buffered_dirent *de;
int host_err;
int size;
loff_t offset;
struct readdir_data buf = {
.ctx.actor = nfsd_buffered_filldir,
.dirent = (void *)__get_free_page(GFP_KERNEL)
};
if (!buf.dirent)
return nfserrno(-ENOMEM);
offset = *offsetp;
while (1) {
unsigned int reclen;
cdp->err = nfserr_eof; /* will be cleared on successful read */
buf.used = 0;
buf.full = 0;
host_err = iterate_dir(file, &buf.ctx);
if (buf.full)
host_err = 0;
if (host_err < 0)
break;
size = buf.used;
if (!size)
break;
de = (struct buffered_dirent *)buf.dirent;
while (size > 0) {
offset = de->offset;
if (func(cdp, de->name, de->namlen, de->offset,
de->ino, de->d_type))
break;
if (cdp->err != nfs_ok)
break;
reclen = ALIGN(sizeof(*de) + de->namlen,
sizeof(u64));
size -= reclen;
de = (struct buffered_dirent *)((char *)de + reclen);
}
if (size > 0) /* We bailed out early */
break;
offset = vfs_llseek(file, 0, SEEK_CUR);
}
free_page((unsigned long)(buf.dirent));
if (host_err)
return nfserrno(host_err);
*offsetp = offset;
return cdp->err;
}
/*
* Read entries from a directory.
* The NFSv3/4 verifier we ignore for now.
*/
__be32
nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
struct readdir_cd *cdp, nfsd_filldir_t func)
{
__be32 err;
struct file *file;
loff_t offset = *offsetp;
int may_flags = NFSD_MAY_READ;
/* NFSv2 only supports 32 bit cookies */
if (rqstp->rq_vers > 2)
may_flags |= NFSD_MAY_64BIT_COOKIE;
err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file);
if (err)
goto out;
offset = vfs_llseek(file, offset, SEEK_SET);
if (offset < 0) {
err = nfserrno((int)offset);
goto out_close;
}
err = nfsd_buffered_readdir(file, func, cdp, offsetp);
if (err == nfserr_eof || err == nfserr_toosmall)
err = nfs_ok; /* can still be found in ->err */
out_close:
fput(file);
out:
return err;
}
/*
* Get file system stats
* N.B. After this call fhp needs an fh_put
*/
__be32
nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access)
{
__be32 err;
err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access);
if (!err) {
struct path path = {
.mnt = fhp->fh_export->ex_path.mnt,
.dentry = fhp->fh_dentry,
};
if (vfs_statfs(&path, stat))
err = nfserr_io;
}
return err;
}
static int exp_rdonly(struct svc_rqst *rqstp, struct svc_export *exp)
{
return nfsexp_flags(rqstp, exp) & NFSEXP_READONLY;
}
/*
* Check for a user's access permissions to this inode.
*/
__be32
nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
struct dentry *dentry, int acc)
{
struct inode *inode = d_inode(dentry);
int err;
if ((acc & NFSD_MAY_MASK) == NFSD_MAY_NOP)
return 0;
#if 0
dprintk("nfsd: permission 0x%x%s%s%s%s%s%s%s mode 0%o%s%s%s\n",
acc,
(acc & NFSD_MAY_READ)? " read" : "",
(acc & NFSD_MAY_WRITE)? " write" : "",
(acc & NFSD_MAY_EXEC)? " exec" : "",
(acc & NFSD_MAY_SATTR)? " sattr" : "",
(acc & NFSD_MAY_TRUNC)? " trunc" : "",
(acc & NFSD_MAY_LOCK)? " lock" : "",
(acc & NFSD_MAY_OWNER_OVERRIDE)? " owneroverride" : "",
inode->i_mode,
IS_IMMUTABLE(inode)? " immut" : "",
IS_APPEND(inode)? " append" : "",
__mnt_is_readonly(exp->ex_path.mnt)? " ro" : "");
dprintk(" owner %d/%d user %d/%d\n",
inode->i_uid, inode->i_gid, current_fsuid(), current_fsgid());
#endif
/* Normally we reject any write/sattr etc access on a read-only file
* system. But if it is IRIX doing check on write-access for a
* device special file, we ignore rofs.
*/
if (!(acc & NFSD_MAY_LOCAL_ACCESS))
if (acc & (NFSD_MAY_WRITE | NFSD_MAY_SATTR | NFSD_MAY_TRUNC)) {
if (exp_rdonly(rqstp, exp) ||
__mnt_is_readonly(exp->ex_path.mnt))
return nfserr_rofs;
if (/* (acc & NFSD_MAY_WRITE) && */ IS_IMMUTABLE(inode))
return nfserr_perm;
}
if ((acc & NFSD_MAY_TRUNC) && IS_APPEND(inode))
return nfserr_perm;
if (acc & NFSD_MAY_LOCK) {
/* If we cannot rely on authentication in NLM requests,
* just allow locks, otherwise require read permission, or
* ownership
*/
if (exp->ex_flags & NFSEXP_NOAUTHNLM)
return 0;
else
acc = NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE;
}
/*
* The file owner always gets access permission for accesses that
* would normally be checked at open time. This is to make
* file access work even when the client has done a fchmod(fd, 0).
*
* However, `cp foo bar' should fail nevertheless when bar is
* readonly. A sensible way to do this might be to reject all
* attempts to truncate a read-only file, because a creat() call
* always implies file truncation.
* ... but this isn't really fair. A process may reasonably call
* ftruncate on an open file descriptor on a file with perm 000.
* We must trust the client to do permission checking - using "ACCESS"
* with NFSv3.
*/
if ((acc & NFSD_MAY_OWNER_OVERRIDE) &&
uid_eq(inode->i_uid, current_fsuid()))
return 0;
/* This assumes NFSD_MAY_{READ,WRITE,EXEC} == MAY_{READ,WRITE,EXEC} */
err = inode_permission(inode, acc & (MAY_READ|MAY_WRITE|MAY_EXEC));
/* Allow read access to binaries even when mode 111 */
if (err == -EACCES && S_ISREG(inode->i_mode) &&
(acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE) ||
acc == (NFSD_MAY_READ | NFSD_MAY_READ_IF_EXEC)))
err = inode_permission(inode, MAY_EXEC);
return err? nfserrno(err) : 0;
}
void
nfsd_racache_shutdown(void)
{
struct raparms *raparm, *last_raparm;
unsigned int i;
dprintk("nfsd: freeing readahead buffers.\n");
for (i = 0; i < RAPARM_HASH_SIZE; i++) {
raparm = raparm_hash[i].pb_head;
while(raparm) {
last_raparm = raparm;
raparm = raparm->p_next;
kfree(last_raparm);
}
raparm_hash[i].pb_head = NULL;
}
}
/*
* Initialize readahead param cache
*/
int
nfsd_racache_init(int cache_size)
{
int i;
int j = 0;
int nperbucket;
struct raparms **raparm = NULL;
if (raparm_hash[0].pb_head)
return 0;
nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE);
nperbucket = max(2, nperbucket);
cache_size = nperbucket * RAPARM_HASH_SIZE;
dprintk("nfsd: allocating %d readahead buffers.\n", cache_size);
for (i = 0; i < RAPARM_HASH_SIZE; i++) {
spin_lock_init(&raparm_hash[i].pb_lock);
raparm = &raparm_hash[i].pb_head;
for (j = 0; j < nperbucket; j++) {
*raparm = kzalloc(sizeof(struct raparms), GFP_KERNEL);
if (!*raparm)
goto out_nomem;
raparm = &(*raparm)->p_next;
}
*raparm = NULL;
}
nfsdstats.ra_size = cache_size;
return 0;
out_nomem:
dprintk("nfsd: kmalloc failed, freeing readahead buffers\n");
nfsd_racache_shutdown();
return -ENOMEM;
}
| ./CrossVul/dataset_final_sorted/CWE-404/c/good_3351_8 |
crossvul-cpp_data_good_3346_0 | /*
* USB ZyXEL omni.net LCD PLUS driver
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
* Please report both successes and troubles to the author at omninet@kroah.com
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#define DRIVER_AUTHOR "Alessandro Zummo"
#define DRIVER_DESC "USB ZyXEL omni.net LCD PLUS Driver"
#define ZYXEL_VENDOR_ID 0x0586
#define ZYXEL_OMNINET_ID 0x1000
/* This one seems to be a re-branded ZyXEL device */
#define BT_IGNITIONPRO_ID 0x2000
/* function prototypes */
static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port);
static void omninet_process_read_urb(struct urb *urb);
static void omninet_write_bulk_callback(struct urb *urb);
static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int omninet_write_room(struct tty_struct *tty);
static void omninet_disconnect(struct usb_serial *serial);
static int omninet_attach(struct usb_serial *serial);
static int omninet_port_probe(struct usb_serial_port *port);
static int omninet_port_remove(struct usb_serial_port *port);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
{ USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver zyxel_omninet_device = {
.driver = {
.owner = THIS_MODULE,
.name = "omninet",
},
.description = "ZyXEL - omni.net lcd plus usb",
.id_table = id_table,
.num_ports = 1,
.attach = omninet_attach,
.port_probe = omninet_port_probe,
.port_remove = omninet_port_remove,
.open = omninet_open,
.write = omninet_write,
.write_room = omninet_write_room,
.write_bulk_callback = omninet_write_bulk_callback,
.process_read_urb = omninet_process_read_urb,
.disconnect = omninet_disconnect,
};
static struct usb_serial_driver * const serial_drivers[] = {
&zyxel_omninet_device, NULL
};
/*
* The protocol.
*
* The omni.net always exchange 64 bytes of data with the host. The first
* four bytes are the control header.
*
* oh_seq is a sequence number. Don't know if/how it's used.
* oh_len is the length of the data bytes in the packet.
* oh_xxx Bit-mapped, related to handshaking and status info.
* I normally set it to 0x03 in transmitted frames.
* 7: Active when the TA is in a CONNECTed state.
* 6: unknown
* 5: handshaking, unknown
* 4: handshaking, unknown
* 3: unknown, usually 0
* 2: unknown, usually 0
* 1: handshaking, unknown, usually set to 1 in transmitted frames
* 0: handshaking, unknown, usually set to 1 in transmitted frames
* oh_pad Probably a pad byte.
*
* After the header you will find data bytes if oh_len was greater than zero.
*/
struct omninet_header {
__u8 oh_seq;
__u8 oh_len;
__u8 oh_xxx;
__u8 oh_pad;
};
struct omninet_data {
__u8 od_outseq; /* Sequence number for bulk_out URBs */
};
static int omninet_attach(struct usb_serial *serial)
{
/* The second bulk-out endpoint is used for writing. */
if (serial->num_bulk_out < 2) {
dev_err(&serial->interface->dev, "missing endpoints\n");
return -ENODEV;
}
return 0;
}
static int omninet_port_probe(struct usb_serial_port *port)
{
struct omninet_data *od;
od = kzalloc(sizeof(*od), GFP_KERNEL);
if (!od)
return -ENOMEM;
usb_set_serial_port_data(port, od);
return 0;
}
static int omninet_port_remove(struct usb_serial_port *port)
{
struct omninet_data *od;
od = usb_get_serial_port_data(port);
kfree(od);
return 0;
}
static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port)
{
return usb_serial_generic_open(tty, port);
}
#define OMNINET_HEADERLEN 4
#define OMNINET_BULKOUTSIZE 64
#define OMNINET_PAYLOADSIZE (OMNINET_BULKOUTSIZE - OMNINET_HEADERLEN)
static void omninet_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
const struct omninet_header *hdr = urb->transfer_buffer;
const unsigned char *data;
size_t data_len;
if (urb->actual_length <= OMNINET_HEADERLEN || !hdr->oh_len)
return;
data = (char *)urb->transfer_buffer + OMNINET_HEADERLEN;
data_len = min_t(size_t, urb->actual_length - OMNINET_HEADERLEN,
hdr->oh_len);
tty_insert_flip_string(&port->port, data, data_len);
tty_flip_buffer_push(&port->port);
}
static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
struct usb_serial *serial = port->serial;
struct usb_serial_port *wport = serial->port[1];
struct omninet_data *od = usb_get_serial_port_data(port);
struct omninet_header *header = (struct omninet_header *)
wport->write_urb->transfer_buffer;
int result;
if (count == 0) {
dev_dbg(&port->dev, "%s - write request of 0 bytes\n", __func__);
return 0;
}
if (!test_and_clear_bit(0, &port->write_urbs_free)) {
dev_dbg(&port->dev, "%s - already writing\n", __func__);
return 0;
}
count = (count > OMNINET_PAYLOADSIZE) ? OMNINET_PAYLOADSIZE : count;
memcpy(wport->write_urb->transfer_buffer + OMNINET_HEADERLEN,
buf, count);
usb_serial_debug_data(&port->dev, __func__, count,
wport->write_urb->transfer_buffer);
header->oh_seq = od->od_outseq++;
header->oh_len = count;
header->oh_xxx = 0x03;
header->oh_pad = 0x00;
/* send the data out the bulk port, always 64 bytes */
wport->write_urb->transfer_buffer_length = OMNINET_BULKOUTSIZE;
result = usb_submit_urb(wport->write_urb, GFP_ATOMIC);
if (result) {
set_bit(0, &wport->write_urbs_free);
dev_err_console(port,
"%s - failed submitting write urb, error %d\n",
__func__, result);
} else
result = count;
return result;
}
static int omninet_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
struct usb_serial_port *wport = serial->port[1];
int room = 0; /* Default: no room */
if (test_bit(0, &wport->write_urbs_free))
room = wport->bulk_out_size - OMNINET_HEADERLEN;
dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
return room;
}
static void omninet_write_bulk_callback(struct urb *urb)
{
/* struct omninet_header *header = (struct omninet_header *)
urb->transfer_buffer; */
struct usb_serial_port *port = urb->context;
int status = urb->status;
set_bit(0, &port->write_urbs_free);
if (status) {
dev_dbg(&port->dev, "%s - nonzero write bulk status received: %d\n",
__func__, status);
return;
}
usb_serial_port_softint(port);
}
static void omninet_disconnect(struct usb_serial *serial)
{
struct usb_serial_port *wport = serial->port[1];
usb_kill_urb(wport->write_urb);
}
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-404/c/good_3346_0 |
crossvul-cpp_data_good_3267_1 | /* Manage a process's keyrings
*
* Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/user.h>
#include <linux/keyctl.h>
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/security.h>
#include <linux/user_namespace.h>
#include <linux/uaccess.h>
#include "internal.h"
/* Session keyring create vs join semaphore */
static DEFINE_MUTEX(key_session_mutex);
/* User keyring creation semaphore */
static DEFINE_MUTEX(key_user_keyring_mutex);
/* The root user's tracking struct */
struct key_user root_key_user = {
.usage = ATOMIC_INIT(3),
.cons_lock = __MUTEX_INITIALIZER(root_key_user.cons_lock),
.lock = __SPIN_LOCK_UNLOCKED(root_key_user.lock),
.nkeys = ATOMIC_INIT(2),
.nikeys = ATOMIC_INIT(2),
.uid = GLOBAL_ROOT_UID,
};
/*
* Install the user and user session keyrings for the current process's UID.
*/
int install_user_keyrings(void)
{
struct user_struct *user;
const struct cred *cred;
struct key *uid_keyring, *session_keyring;
key_perm_t user_keyring_perm;
char buf[20];
int ret;
uid_t uid;
user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL;
cred = current_cred();
user = cred->user;
uid = from_kuid(cred->user_ns, user->uid);
kenter("%p{%u}", user, uid);
if (user->uid_keyring && user->session_keyring) {
kleave(" = 0 [exist]");
return 0;
}
mutex_lock(&key_user_keyring_mutex);
ret = 0;
if (!user->uid_keyring) {
/* get the UID-specific keyring
* - there may be one in existence already as it may have been
* pinned by a session, but the user_struct pointing to it
* may have been destroyed by setuid */
sprintf(buf, "_uid.%u", uid);
uid_keyring = find_keyring_by_name(buf, true);
if (IS_ERR(uid_keyring)) {
uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
cred, user_keyring_perm,
KEY_ALLOC_IN_QUOTA,
NULL, NULL);
if (IS_ERR(uid_keyring)) {
ret = PTR_ERR(uid_keyring);
goto error;
}
}
/* get a default session keyring (which might also exist
* already) */
sprintf(buf, "_uid_ses.%u", uid);
session_keyring = find_keyring_by_name(buf, true);
if (IS_ERR(session_keyring)) {
session_keyring =
keyring_alloc(buf, user->uid, INVALID_GID,
cred, user_keyring_perm,
KEY_ALLOC_IN_QUOTA,
NULL, NULL);
if (IS_ERR(session_keyring)) {
ret = PTR_ERR(session_keyring);
goto error_release;
}
/* we install a link from the user session keyring to
* the user keyring */
ret = key_link(session_keyring, uid_keyring);
if (ret < 0)
goto error_release_both;
}
/* install the keyrings */
user->uid_keyring = uid_keyring;
user->session_keyring = session_keyring;
}
mutex_unlock(&key_user_keyring_mutex);
kleave(" = 0");
return 0;
error_release_both:
key_put(session_keyring);
error_release:
key_put(uid_keyring);
error:
mutex_unlock(&key_user_keyring_mutex);
kleave(" = %d", ret);
return ret;
}
/*
* Install a thread keyring to the given credentials struct if it didn't have
* one already. This is allowed to overrun the quota.
*
* Return: 0 if a thread keyring is now present; -errno on failure.
*/
int install_thread_keyring_to_cred(struct cred *new)
{
struct key *keyring;
if (new->thread_keyring)
return 0;
keyring = keyring_alloc("_tid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN,
NULL, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
new->thread_keyring = keyring;
return 0;
}
/*
* Install a thread keyring to the current task if it didn't have one already.
*
* Return: 0 if a thread keyring is now present; -errno on failure.
*/
static int install_thread_keyring(void)
{
struct cred *new;
int ret;
new = prepare_creds();
if (!new)
return -ENOMEM;
ret = install_thread_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
return ret;
}
return commit_creds(new);
}
/*
* Install a process keyring to the given credentials struct if it didn't have
* one already. This is allowed to overrun the quota.
*
* Return: 0 if a process keyring is now present; -errno on failure.
*/
int install_process_keyring_to_cred(struct cred *new)
{
struct key *keyring;
if (new->process_keyring)
return 0;
keyring = keyring_alloc("_pid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN,
NULL, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
new->process_keyring = keyring;
return 0;
}
/*
* Install a process keyring to the current task if it didn't have one already.
*
* Return: 0 if a process keyring is now present; -errno on failure.
*/
static int install_process_keyring(void)
{
struct cred *new;
int ret;
new = prepare_creds();
if (!new)
return -ENOMEM;
ret = install_process_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
return ret;
}
return commit_creds(new);
}
/*
* Install the given keyring as the session keyring of the given credentials
* struct, replacing the existing one if any. If the given keyring is NULL,
* then install a new anonymous session keyring.
*
* Return: 0 on success; -errno on failure.
*/
int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
{
unsigned long flags;
struct key *old;
might_sleep();
/* create an empty session keyring */
if (!keyring) {
flags = KEY_ALLOC_QUOTA_OVERRUN;
if (cred->session_keyring)
flags = KEY_ALLOC_IN_QUOTA;
keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred,
KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
flags, NULL, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
} else {
__key_get(keyring);
}
/* install the keyring */
old = cred->session_keyring;
rcu_assign_pointer(cred->session_keyring, keyring);
if (old)
key_put(old);
return 0;
}
/*
* Install the given keyring as the session keyring of the current task,
* replacing the existing one if any. If the given keyring is NULL, then
* install a new anonymous session keyring.
*
* Return: 0 on success; -errno on failure.
*/
static int install_session_keyring(struct key *keyring)
{
struct cred *new;
int ret;
new = prepare_creds();
if (!new)
return -ENOMEM;
ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0) {
abort_creds(new);
return ret;
}
return commit_creds(new);
}
/*
* Handle the fsuid changing.
*/
void key_fsuid_changed(struct task_struct *tsk)
{
/* update the ownership of the thread keyring */
BUG_ON(!tsk->cred);
if (tsk->cred->thread_keyring) {
down_write(&tsk->cred->thread_keyring->sem);
tsk->cred->thread_keyring->uid = tsk->cred->fsuid;
up_write(&tsk->cred->thread_keyring->sem);
}
}
/*
* Handle the fsgid changing.
*/
void key_fsgid_changed(struct task_struct *tsk)
{
/* update the ownership of the thread keyring */
BUG_ON(!tsk->cred);
if (tsk->cred->thread_keyring) {
down_write(&tsk->cred->thread_keyring->sem);
tsk->cred->thread_keyring->gid = tsk->cred->fsgid;
up_write(&tsk->cred->thread_keyring->sem);
}
}
/*
* Search the process keyrings attached to the supplied cred for the first
* matching key.
*
* The search criteria are the type and the match function. The description is
* given to the match function as a parameter, but doesn't otherwise influence
* the search. Typically the match function will compare the description
* parameter to the key's description.
*
* This can only search keyrings that grant Search permission to the supplied
* credentials. Keyrings linked to searched keyrings will also be searched if
* they grant Search permission too. Keys can only be found if they grant
* Search permission to the credentials.
*
* Returns a pointer to the key with the key usage count incremented if
* successful, -EAGAIN if we didn't find any matching key or -ENOKEY if we only
* matched negative keys.
*
* In the case of a successful return, the possession attribute is set on the
* returned key reference.
*/
key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
{
key_ref_t key_ref, ret, err;
/* we want to return -EAGAIN or -ENOKEY if any of the keyrings were
* searchable, but we failed to find a key or we found a negative key;
* otherwise we want to return a sample error (probably -EACCES) if
* none of the keyrings were searchable
*
* in terms of priority: success > -ENOKEY > -EAGAIN > other error
*/
key_ref = NULL;
ret = NULL;
err = ERR_PTR(-EAGAIN);
/* search the thread keyring first */
if (ctx->cred->thread_keyring) {
key_ref = keyring_search_aux(
make_key_ref(ctx->cred->thread_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* search the process keyring second */
if (ctx->cred->process_keyring) {
key_ref = keyring_search_aux(
make_key_ref(ctx->cred->process_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* search the session keyring */
if (ctx->cred->session_keyring) {
rcu_read_lock();
key_ref = keyring_search_aux(
make_key_ref(rcu_dereference(ctx->cred->session_keyring), 1),
ctx);
rcu_read_unlock();
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* or search the user-session keyring */
else if (ctx->cred->user->session_keyring) {
key_ref = keyring_search_aux(
make_key_ref(ctx->cred->user->session_keyring, 1),
ctx);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* no key - decide on the error we're going to go for */
key_ref = ret ? ret : err;
found:
return key_ref;
}
/*
* Search the process keyrings attached to the supplied cred for the first
* matching key in the manner of search_my_process_keyrings(), but also search
* the keys attached to the assumed authorisation key using its credentials if
* one is available.
*
* Return same as search_my_process_keyrings().
*/
key_ref_t search_process_keyrings(struct keyring_search_context *ctx)
{
struct request_key_auth *rka;
key_ref_t key_ref, ret = ERR_PTR(-EACCES), err;
might_sleep();
key_ref = search_my_process_keyrings(ctx);
if (!IS_ERR(key_ref))
goto found;
err = key_ref;
/* if this process has an instantiation authorisation key, then we also
* search the keyrings of the process mentioned there
* - we don't permit access to request_key auth keys via this method
*/
if (ctx->cred->request_key_auth &&
ctx->cred == current_cred() &&
ctx->index_key.type != &key_type_request_key_auth
) {
const struct cred *cred = ctx->cred;
/* defend against the auth key being revoked */
down_read(&cred->request_key_auth->sem);
if (key_validate(ctx->cred->request_key_auth) == 0) {
rka = ctx->cred->request_key_auth->payload.data[0];
ctx->cred = rka->cred;
key_ref = search_process_keyrings(ctx);
ctx->cred = cred;
up_read(&cred->request_key_auth->sem);
if (!IS_ERR(key_ref))
goto found;
ret = key_ref;
} else {
up_read(&cred->request_key_auth->sem);
}
}
/* no key - decide on the error we're going to go for */
if (err == ERR_PTR(-ENOKEY) || ret == ERR_PTR(-ENOKEY))
key_ref = ERR_PTR(-ENOKEY);
else if (err == ERR_PTR(-EACCES))
key_ref = ret;
else
key_ref = err;
found:
return key_ref;
}
/*
* See if the key we're looking at is the target key.
*/
bool lookup_user_key_possessed(const struct key *key,
const struct key_match_data *match_data)
{
return key == match_data->raw_data;
}
/*
* Look up a key ID given us by userspace with a given permissions mask to get
* the key it refers to.
*
* Flags can be passed to request that special keyrings be created if referred
* to directly, to permit partially constructed keys to be found and to skip
* validity and permission checks on the found key.
*
* Returns a pointer to the key with an incremented usage count if successful;
* -EINVAL if the key ID is invalid; -ENOKEY if the key ID does not correspond
* to a key or the best found key was a negative key; -EKEYREVOKED or
* -EKEYEXPIRED if the best found key was revoked or expired; -EACCES if the
* found key doesn't grant the requested permit or the LSM denied access to it;
* or -ENOMEM if a special keyring couldn't be created.
*
* In the case of a successful return, the possession attribute is set on the
* returned key reference.
*/
key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
key_perm_t perm)
{
struct keyring_search_context ctx = {
.match_data.cmp = lookup_user_key_possessed,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.flags = KEYRING_SEARCH_NO_STATE_CHECK,
};
struct request_key_auth *rka;
struct key *key;
key_ref_t key_ref, skey_ref;
int ret;
try_again:
ctx.cred = get_current_cred();
key_ref = ERR_PTR(-ENOKEY);
switch (id) {
case KEY_SPEC_THREAD_KEYRING:
if (!ctx.cred->thread_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
ret = install_thread_keyring();
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error;
}
goto reget_creds;
}
key = ctx.cred->thread_keyring;
__key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_PROCESS_KEYRING:
if (!ctx.cred->process_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
ret = install_process_keyring();
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error;
}
goto reget_creds;
}
key = ctx.cred->process_keyring;
__key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_SESSION_KEYRING:
if (!ctx.cred->session_keyring) {
/* always install a session keyring upon access if one
* doesn't exist yet */
ret = install_user_keyrings();
if (ret < 0)
goto error;
if (lflags & KEY_LOOKUP_CREATE)
ret = join_session_keyring(NULL);
else
ret = install_session_keyring(
ctx.cred->user->session_keyring);
if (ret < 0)
goto error;
goto reget_creds;
} else if (ctx.cred->session_keyring ==
ctx.cred->user->session_keyring &&
lflags & KEY_LOOKUP_CREATE) {
ret = join_session_keyring(NULL);
if (ret < 0)
goto error;
goto reget_creds;
}
rcu_read_lock();
key = rcu_dereference(ctx.cred->session_keyring);
__key_get(key);
rcu_read_unlock();
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_KEYRING:
if (!ctx.cred->user->uid_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
key = ctx.cred->user->uid_keyring;
__key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_SESSION_KEYRING:
if (!ctx.cred->user->session_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
key = ctx.cred->user->session_keyring;
__key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_GROUP_KEYRING:
/* group keyrings are not yet supported */
key_ref = ERR_PTR(-EINVAL);
goto error;
case KEY_SPEC_REQKEY_AUTH_KEY:
key = ctx.cred->request_key_auth;
if (!key)
goto error;
__key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_REQUESTOR_KEYRING:
if (!ctx.cred->request_key_auth)
goto error;
down_read(&ctx.cred->request_key_auth->sem);
if (test_bit(KEY_FLAG_REVOKED,
&ctx.cred->request_key_auth->flags)) {
key_ref = ERR_PTR(-EKEYREVOKED);
key = NULL;
} else {
rka = ctx.cred->request_key_auth->payload.data[0];
key = rka->dest_keyring;
__key_get(key);
}
up_read(&ctx.cred->request_key_auth->sem);
if (!key)
goto error;
key_ref = make_key_ref(key, 1);
break;
default:
key_ref = ERR_PTR(-EINVAL);
if (id < 1)
goto error;
key = key_lookup(id);
if (IS_ERR(key)) {
key_ref = ERR_CAST(key);
goto error;
}
key_ref = make_key_ref(key, 0);
/* check to see if we possess the key */
ctx.index_key.type = key->type;
ctx.index_key.description = key->description;
ctx.index_key.desc_len = strlen(key->description);
ctx.match_data.raw_data = key;
kdebug("check possessed");
skey_ref = search_process_keyrings(&ctx);
kdebug("possessed=%p", skey_ref);
if (!IS_ERR(skey_ref)) {
key_put(key);
key_ref = skey_ref;
}
break;
}
/* unlink does not use the nominated key in any way, so can skip all
* the permission checks as it is only concerned with the keyring */
if (lflags & KEY_LOOKUP_FOR_UNLINK) {
ret = 0;
goto error;
}
if (!(lflags & KEY_LOOKUP_PARTIAL)) {
ret = wait_for_key_construction(key, true);
switch (ret) {
case -ERESTARTSYS:
goto invalid_key;
default:
if (perm)
goto invalid_key;
case 0:
break;
}
} else if (perm) {
ret = key_validate(key);
if (ret < 0)
goto invalid_key;
}
ret = -EIO;
if (!(lflags & KEY_LOOKUP_PARTIAL) &&
!test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
goto invalid_key;
/* check the permissions */
ret = key_task_permission(key_ref, ctx.cred, perm);
if (ret < 0)
goto invalid_key;
key->last_used_at = current_kernel_time().tv_sec;
error:
put_cred(ctx.cred);
return key_ref;
invalid_key:
key_ref_put(key_ref);
key_ref = ERR_PTR(ret);
goto error;
/* if we attempted to install a keyring, then it may have caused new
* creds to be installed */
reget_creds:
put_cred(ctx.cred);
goto try_again;
}
/*
* Join the named keyring as the session keyring if possible else attempt to
* create a new one of that name and join that.
*
* If the name is NULL, an empty anonymous keyring will be installed as the
* session keyring.
*
* Named session keyrings are joined with a semaphore held to prevent the
* keyrings from going away whilst the attempt is made to going them and also
* to prevent a race in creating compatible session keyrings.
*/
long join_session_keyring(const char *name)
{
const struct cred *old;
struct cred *new;
struct key *keyring;
long ret, serial;
new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();
/* if no name is provided, install an anonymous keyring */
if (!name) {
ret = install_session_keyring_to_cred(new, NULL);
if (ret < 0)
goto error;
serial = new->session_keyring->serial;
ret = commit_creds(new);
if (ret == 0)
ret = serial;
goto okay;
}
/* allow the user to join or create a named keyring */
mutex_lock(&key_session_mutex);
/* look for an existing keyring of this name */
keyring = find_keyring_by_name(name, false);
if (PTR_ERR(keyring) == -ENOKEY) {
/* not found - try and create a new one */
keyring = keyring_alloc(
name, old->uid, old->gid, old,
KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK,
KEY_ALLOC_IN_QUOTA, NULL, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
}
} else if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
} else if (keyring == new->session_keyring) {
key_put(keyring);
ret = 0;
goto error2;
}
/* we've got a keyring - now to install it */
ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0)
goto error2;
commit_creds(new);
mutex_unlock(&key_session_mutex);
ret = keyring->serial;
key_put(keyring);
okay:
return ret;
error2:
mutex_unlock(&key_session_mutex);
error:
abort_creds(new);
return ret;
}
/*
* Replace a process's session keyring on behalf of one of its children when
* the target process is about to resume userspace execution.
*/
void key_change_session_keyring(struct callback_head *twork)
{
const struct cred *old = current_cred();
struct cred *new = container_of(twork, struct cred, rcu);
if (unlikely(current->flags & PF_EXITING)) {
put_cred(new);
return;
}
new-> uid = old-> uid;
new-> euid = old-> euid;
new-> suid = old-> suid;
new->fsuid = old->fsuid;
new-> gid = old-> gid;
new-> egid = old-> egid;
new-> sgid = old-> sgid;
new->fsgid = old->fsgid;
new->user = get_uid(old->user);
new->user_ns = get_user_ns(old->user_ns);
new->group_info = get_group_info(old->group_info);
new->securebits = old->securebits;
new->cap_inheritable = old->cap_inheritable;
new->cap_permitted = old->cap_permitted;
new->cap_effective = old->cap_effective;
new->cap_ambient = old->cap_ambient;
new->cap_bset = old->cap_bset;
new->jit_keyring = old->jit_keyring;
new->thread_keyring = key_get(old->thread_keyring);
new->process_keyring = key_get(old->process_keyring);
security_transfer_creds(new, old);
commit_creds(new);
}
/*
* Make sure that root's user and user-session keyrings exist.
*/
static int __init init_root_keyring(void)
{
return install_user_keyrings();
}
late_initcall(init_root_keyring);
| ./CrossVul/dataset_final_sorted/CWE-404/c/good_3267_1 |
crossvul-cpp_data_bad_3346_0 | /*
* USB ZyXEL omni.net LCD PLUS driver
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
* Please report both successes and troubles to the author at omninet@kroah.com
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#define DRIVER_AUTHOR "Alessandro Zummo"
#define DRIVER_DESC "USB ZyXEL omni.net LCD PLUS Driver"
#define ZYXEL_VENDOR_ID 0x0586
#define ZYXEL_OMNINET_ID 0x1000
/* This one seems to be a re-branded ZyXEL device */
#define BT_IGNITIONPRO_ID 0x2000
/* function prototypes */
static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port);
static void omninet_process_read_urb(struct urb *urb);
static void omninet_write_bulk_callback(struct urb *urb);
static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int omninet_write_room(struct tty_struct *tty);
static void omninet_disconnect(struct usb_serial *serial);
static int omninet_attach(struct usb_serial *serial);
static int omninet_port_probe(struct usb_serial_port *port);
static int omninet_port_remove(struct usb_serial_port *port);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
{ USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver zyxel_omninet_device = {
.driver = {
.owner = THIS_MODULE,
.name = "omninet",
},
.description = "ZyXEL - omni.net lcd plus usb",
.id_table = id_table,
.num_ports = 1,
.attach = omninet_attach,
.port_probe = omninet_port_probe,
.port_remove = omninet_port_remove,
.open = omninet_open,
.write = omninet_write,
.write_room = omninet_write_room,
.write_bulk_callback = omninet_write_bulk_callback,
.process_read_urb = omninet_process_read_urb,
.disconnect = omninet_disconnect,
};
static struct usb_serial_driver * const serial_drivers[] = {
&zyxel_omninet_device, NULL
};
/*
* The protocol.
*
* The omni.net always exchange 64 bytes of data with the host. The first
* four bytes are the control header.
*
* oh_seq is a sequence number. Don't know if/how it's used.
* oh_len is the length of the data bytes in the packet.
* oh_xxx Bit-mapped, related to handshaking and status info.
* I normally set it to 0x03 in transmitted frames.
* 7: Active when the TA is in a CONNECTed state.
* 6: unknown
* 5: handshaking, unknown
* 4: handshaking, unknown
* 3: unknown, usually 0
* 2: unknown, usually 0
* 1: handshaking, unknown, usually set to 1 in transmitted frames
* 0: handshaking, unknown, usually set to 1 in transmitted frames
* oh_pad Probably a pad byte.
*
* After the header you will find data bytes if oh_len was greater than zero.
*/
struct omninet_header {
__u8 oh_seq;
__u8 oh_len;
__u8 oh_xxx;
__u8 oh_pad;
};
struct omninet_data {
__u8 od_outseq; /* Sequence number for bulk_out URBs */
};
static int omninet_attach(struct usb_serial *serial)
{
/* The second bulk-out endpoint is used for writing. */
if (serial->num_bulk_out < 2) {
dev_err(&serial->interface->dev, "missing endpoints\n");
return -ENODEV;
}
return 0;
}
static int omninet_port_probe(struct usb_serial_port *port)
{
struct omninet_data *od;
od = kzalloc(sizeof(*od), GFP_KERNEL);
if (!od)
return -ENOMEM;
usb_set_serial_port_data(port, od);
return 0;
}
static int omninet_port_remove(struct usb_serial_port *port)
{
struct omninet_data *od;
od = usb_get_serial_port_data(port);
kfree(od);
return 0;
}
static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct usb_serial_port *wport;
wport = serial->port[1];
tty_port_tty_set(&wport->port, tty);
return usb_serial_generic_open(tty, port);
}
#define OMNINET_HEADERLEN 4
#define OMNINET_BULKOUTSIZE 64
#define OMNINET_PAYLOADSIZE (OMNINET_BULKOUTSIZE - OMNINET_HEADERLEN)
static void omninet_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
const struct omninet_header *hdr = urb->transfer_buffer;
const unsigned char *data;
size_t data_len;
if (urb->actual_length <= OMNINET_HEADERLEN || !hdr->oh_len)
return;
data = (char *)urb->transfer_buffer + OMNINET_HEADERLEN;
data_len = min_t(size_t, urb->actual_length - OMNINET_HEADERLEN,
hdr->oh_len);
tty_insert_flip_string(&port->port, data, data_len);
tty_flip_buffer_push(&port->port);
}
static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
struct usb_serial *serial = port->serial;
struct usb_serial_port *wport = serial->port[1];
struct omninet_data *od = usb_get_serial_port_data(port);
struct omninet_header *header = (struct omninet_header *)
wport->write_urb->transfer_buffer;
int result;
if (count == 0) {
dev_dbg(&port->dev, "%s - write request of 0 bytes\n", __func__);
return 0;
}
if (!test_and_clear_bit(0, &port->write_urbs_free)) {
dev_dbg(&port->dev, "%s - already writing\n", __func__);
return 0;
}
count = (count > OMNINET_PAYLOADSIZE) ? OMNINET_PAYLOADSIZE : count;
memcpy(wport->write_urb->transfer_buffer + OMNINET_HEADERLEN,
buf, count);
usb_serial_debug_data(&port->dev, __func__, count,
wport->write_urb->transfer_buffer);
header->oh_seq = od->od_outseq++;
header->oh_len = count;
header->oh_xxx = 0x03;
header->oh_pad = 0x00;
/* send the data out the bulk port, always 64 bytes */
wport->write_urb->transfer_buffer_length = OMNINET_BULKOUTSIZE;
result = usb_submit_urb(wport->write_urb, GFP_ATOMIC);
if (result) {
set_bit(0, &wport->write_urbs_free);
dev_err_console(port,
"%s - failed submitting write urb, error %d\n",
__func__, result);
} else
result = count;
return result;
}
static int omninet_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
struct usb_serial_port *wport = serial->port[1];
int room = 0; /* Default: no room */
if (test_bit(0, &wport->write_urbs_free))
room = wport->bulk_out_size - OMNINET_HEADERLEN;
dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
return room;
}
static void omninet_write_bulk_callback(struct urb *urb)
{
/* struct omninet_header *header = (struct omninet_header *)
urb->transfer_buffer; */
struct usb_serial_port *port = urb->context;
int status = urb->status;
set_bit(0, &port->write_urbs_free);
if (status) {
dev_dbg(&port->dev, "%s - nonzero write bulk status received: %d\n",
__func__, status);
return;
}
usb_serial_port_softint(port);
}
static void omninet_disconnect(struct usb_serial *serial)
{
struct usb_serial_port *wport = serial->port[1];
usb_kill_urb(wport->write_urb);
}
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-404/c/bad_3346_0 |
crossvul-cpp_data_good_3351_0 | /*
* linux/fs/lockd/svc.c
*
* This is the central lockd service.
*
* FIXME: Separate the lockd NFS server functionality from the lockd NFS
* client functionality. Oh why didn't Sun create two separate
* services in the first place?
*
* Authors: Olaf Kirch (okir@monad.swb.de)
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/moduleparam.h>
#include <linux/sched/signal.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/uio.h>
#include <linux/smp.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/inetdevice.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/svc_xprt.h>
#include <net/ip.h>
#include <net/addrconf.h>
#include <net/ipv6.h>
#include <linux/lockd/lockd.h>
#include <linux/nfs.h>
#include "netns.h"
#include "procfs.h"
#define NLMDBG_FACILITY NLMDBG_SVC
#define LOCKD_BUFSIZE (1024 + NLMSVC_XDRSIZE)
#define ALLOWED_SIGS (sigmask(SIGKILL))
static struct svc_program nlmsvc_program;
const struct nlmsvc_binding *nlmsvc_ops;
EXPORT_SYMBOL_GPL(nlmsvc_ops);
static DEFINE_MUTEX(nlmsvc_mutex);
static unsigned int nlmsvc_users;
static struct task_struct *nlmsvc_task;
static struct svc_rqst *nlmsvc_rqst;
unsigned long nlmsvc_timeout;
unsigned int lockd_net_id;
/*
* These can be set at insmod time (useful for NFS as root filesystem),
* and also changed through the sysctl interface. -- Jamie Lokier, Aug 2003
*/
static unsigned long nlm_grace_period;
static unsigned long nlm_timeout = LOCKD_DFLT_TIMEO;
static int nlm_udpport, nlm_tcpport;
/* RLIM_NOFILE defaults to 1024. That seems like a reasonable default here. */
static unsigned int nlm_max_connections = 1024;
/*
* Constants needed for the sysctl interface.
*/
static const unsigned long nlm_grace_period_min = 0;
static const unsigned long nlm_grace_period_max = 240;
static const unsigned long nlm_timeout_min = 3;
static const unsigned long nlm_timeout_max = 20;
static const int nlm_port_min = 0, nlm_port_max = 65535;
#ifdef CONFIG_SYSCTL
static struct ctl_table_header * nlm_sysctl_table;
#endif
static unsigned long get_lockd_grace_period(void)
{
/* Note: nlm_timeout should always be nonzero */
if (nlm_grace_period)
return roundup(nlm_grace_period, nlm_timeout) * HZ;
else
return nlm_timeout * 5 * HZ;
}
static void grace_ender(struct work_struct *grace)
{
struct delayed_work *dwork = to_delayed_work(grace);
struct lockd_net *ln = container_of(dwork, struct lockd_net,
grace_period_end);
locks_end_grace(&ln->lockd_manager);
}
static void set_grace_period(struct net *net)
{
unsigned long grace_period = get_lockd_grace_period();
struct lockd_net *ln = net_generic(net, lockd_net_id);
locks_start_grace(net, &ln->lockd_manager);
cancel_delayed_work_sync(&ln->grace_period_end);
schedule_delayed_work(&ln->grace_period_end, grace_period);
}
static void restart_grace(void)
{
if (nlmsvc_ops) {
struct net *net = &init_net;
struct lockd_net *ln = net_generic(net, lockd_net_id);
cancel_delayed_work_sync(&ln->grace_period_end);
locks_end_grace(&ln->lockd_manager);
nlmsvc_invalidate_all();
set_grace_period(net);
}
}
/*
* This is the lockd kernel thread
*/
static int
lockd(void *vrqstp)
{
int err = 0;
struct svc_rqst *rqstp = vrqstp;
struct net *net = &init_net;
struct lockd_net *ln = net_generic(net, lockd_net_id);
/* try_to_freeze() is called from svc_recv() */
set_freezable();
/* Allow SIGKILL to tell lockd to drop all of its locks */
allow_signal(SIGKILL);
dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
/*
* The main request loop. We don't terminate until the last
* NFS mount or NFS daemon has gone away.
*/
while (!kthread_should_stop()) {
long timeout = MAX_SCHEDULE_TIMEOUT;
RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
/* update sv_maxconn if it has changed */
rqstp->rq_server->sv_maxconn = nlm_max_connections;
if (signalled()) {
flush_signals(current);
restart_grace();
continue;
}
timeout = nlmsvc_retry_blocked();
/*
* Find a socket with data available and call its
* recvfrom routine.
*/
err = svc_recv(rqstp, timeout);
if (err == -EAGAIN || err == -EINTR)
continue;
dprintk("lockd: request from %s\n",
svc_print_addr(rqstp, buf, sizeof(buf)));
svc_process(rqstp);
}
flush_signals(current);
if (nlmsvc_ops)
nlmsvc_invalidate_all();
nlm_shutdown_hosts();
cancel_delayed_work_sync(&ln->grace_period_end);
locks_end_grace(&ln->lockd_manager);
return 0;
}
static int create_lockd_listener(struct svc_serv *serv, const char *name,
struct net *net, const int family,
const unsigned short port)
{
struct svc_xprt *xprt;
xprt = svc_find_xprt(serv, name, net, family, 0);
if (xprt == NULL)
return svc_create_xprt(serv, name, net, family, port,
SVC_SOCK_DEFAULTS);
svc_xprt_put(xprt);
return 0;
}
static int create_lockd_family(struct svc_serv *serv, struct net *net,
const int family)
{
int err;
err = create_lockd_listener(serv, "udp", net, family, nlm_udpport);
if (err < 0)
return err;
return create_lockd_listener(serv, "tcp", net, family, nlm_tcpport);
}
/*
* Ensure there are active UDP and TCP listeners for lockd.
*
* Even if we have only TCP NFS mounts and/or TCP NFSDs, some
* local services (such as rpc.statd) still require UDP, and
* some NFS servers do not yet support NLM over TCP.
*
* Returns zero if all listeners are available; otherwise a
* negative errno value is returned.
*/
static int make_socks(struct svc_serv *serv, struct net *net)
{
static int warned;
int err;
err = create_lockd_family(serv, net, PF_INET);
if (err < 0)
goto out_err;
err = create_lockd_family(serv, net, PF_INET6);
if (err < 0 && err != -EAFNOSUPPORT)
goto out_err;
warned = 0;
return 0;
out_err:
if (warned++ == 0)
printk(KERN_WARNING
"lockd_up: makesock failed, error=%d\n", err);
svc_shutdown_net(serv, net);
return err;
}
static int lockd_up_net(struct svc_serv *serv, struct net *net)
{
struct lockd_net *ln = net_generic(net, lockd_net_id);
int error;
if (ln->nlmsvc_users++)
return 0;
error = svc_bind(serv, net);
if (error)
goto err_bind;
error = make_socks(serv, net);
if (error < 0)
goto err_bind;
set_grace_period(net);
dprintk("lockd_up_net: per-net data created; net=%p\n", net);
return 0;
err_bind:
ln->nlmsvc_users--;
return error;
}
static void lockd_down_net(struct svc_serv *serv, struct net *net)
{
struct lockd_net *ln = net_generic(net, lockd_net_id);
if (ln->nlmsvc_users) {
if (--ln->nlmsvc_users == 0) {
nlm_shutdown_hosts_net(net);
svc_shutdown_net(serv, net);
dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
}
} else {
printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n",
nlmsvc_task, net);
BUG();
}
}
static int lockd_inetaddr_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct sockaddr_in sin;
if (event != NETDEV_DOWN)
goto out;
if (nlmsvc_rqst) {
dprintk("lockd_inetaddr_event: removed %pI4\n",
&ifa->ifa_local);
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = ifa->ifa_local;
svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
(struct sockaddr *)&sin);
}
out:
return NOTIFY_DONE;
}
static struct notifier_block lockd_inetaddr_notifier = {
.notifier_call = lockd_inetaddr_event,
};
#if IS_ENABLED(CONFIG_IPV6)
static int lockd_inet6addr_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
struct sockaddr_in6 sin6;
if (event != NETDEV_DOWN)
goto out;
if (nlmsvc_rqst) {
dprintk("lockd_inet6addr_event: removed %pI6\n", &ifa->addr);
sin6.sin6_family = AF_INET6;
sin6.sin6_addr = ifa->addr;
if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin6.sin6_scope_id = ifa->idev->dev->ifindex;
svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
(struct sockaddr *)&sin6);
}
out:
return NOTIFY_DONE;
}
static struct notifier_block lockd_inet6addr_notifier = {
.notifier_call = lockd_inet6addr_event,
};
#endif
static void lockd_unregister_notifiers(void)
{
unregister_inetaddr_notifier(&lockd_inetaddr_notifier);
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
#endif
}
static void lockd_svc_exit_thread(void)
{
lockd_unregister_notifiers();
svc_exit_thread(nlmsvc_rqst);
}
static int lockd_start_svc(struct svc_serv *serv)
{
int error;
if (nlmsvc_rqst)
return 0;
/*
* Create the kernel thread and wait for it to start.
*/
nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
if (IS_ERR(nlmsvc_rqst)) {
error = PTR_ERR(nlmsvc_rqst);
printk(KERN_WARNING
"lockd_up: svc_rqst allocation failed, error=%d\n",
error);
goto out_rqst;
}
svc_sock_update_bufs(serv);
serv->sv_maxconn = nlm_max_connections;
nlmsvc_task = kthread_create(lockd, nlmsvc_rqst, "%s", serv->sv_name);
if (IS_ERR(nlmsvc_task)) {
error = PTR_ERR(nlmsvc_task);
printk(KERN_WARNING
"lockd_up: kthread_run failed, error=%d\n", error);
goto out_task;
}
nlmsvc_rqst->rq_task = nlmsvc_task;
wake_up_process(nlmsvc_task);
dprintk("lockd_up: service started\n");
return 0;
out_task:
lockd_svc_exit_thread();
nlmsvc_task = NULL;
out_rqst:
nlmsvc_rqst = NULL;
return error;
}
static struct svc_serv_ops lockd_sv_ops = {
.svo_shutdown = svc_rpcb_cleanup,
.svo_enqueue_xprt = svc_xprt_do_enqueue,
};
static struct svc_serv *lockd_create_svc(void)
{
struct svc_serv *serv;
/*
* Check whether we're already up and running.
*/
if (nlmsvc_rqst) {
/*
* Note: increase service usage, because later in case of error
* svc_destroy() will be called.
*/
svc_get(nlmsvc_rqst->rq_server);
return nlmsvc_rqst->rq_server;
}
/*
* Sanity check: if there's no pid,
* we should be the first user ...
*/
if (nlmsvc_users)
printk(KERN_WARNING
"lockd_up: no pid, %d users??\n", nlmsvc_users);
if (!nlm_timeout)
nlm_timeout = LOCKD_DFLT_TIMEO;
nlmsvc_timeout = nlm_timeout * HZ;
serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, &lockd_sv_ops);
if (!serv) {
printk(KERN_WARNING "lockd_up: create service failed\n");
return ERR_PTR(-ENOMEM);
}
register_inetaddr_notifier(&lockd_inetaddr_notifier);
#if IS_ENABLED(CONFIG_IPV6)
register_inet6addr_notifier(&lockd_inet6addr_notifier);
#endif
dprintk("lockd_up: service created\n");
return serv;
}
/*
* Bring up the lockd process if it's not already up.
*/
int lockd_up(struct net *net)
{
struct svc_serv *serv;
int error;
mutex_lock(&nlmsvc_mutex);
serv = lockd_create_svc();
if (IS_ERR(serv)) {
error = PTR_ERR(serv);
goto err_create;
}
error = lockd_up_net(serv, net);
if (error < 0)
goto err_net;
error = lockd_start_svc(serv);
if (error < 0)
goto err_start;
nlmsvc_users++;
/*
* Note: svc_serv structures have an initial use count of 1,
* so we exit through here on both success and failure.
*/
err_put:
svc_destroy(serv);
err_create:
mutex_unlock(&nlmsvc_mutex);
return error;
err_start:
lockd_down_net(serv, net);
err_net:
lockd_unregister_notifiers();
goto err_put;
}
EXPORT_SYMBOL_GPL(lockd_up);
/*
* Decrement the user count and bring down lockd if we're the last.
*/
void
lockd_down(struct net *net)
{
mutex_lock(&nlmsvc_mutex);
lockd_down_net(nlmsvc_rqst->rq_server, net);
if (nlmsvc_users) {
if (--nlmsvc_users)
goto out;
} else {
printk(KERN_ERR "lockd_down: no users! task=%p\n",
nlmsvc_task);
BUG();
}
if (!nlmsvc_task) {
printk(KERN_ERR "lockd_down: no lockd running.\n");
BUG();
}
kthread_stop(nlmsvc_task);
dprintk("lockd_down: service stopped\n");
lockd_svc_exit_thread();
dprintk("lockd_down: service destroyed\n");
nlmsvc_task = NULL;
nlmsvc_rqst = NULL;
out:
mutex_unlock(&nlmsvc_mutex);
}
EXPORT_SYMBOL_GPL(lockd_down);
#ifdef CONFIG_SYSCTL
/*
* Sysctl parameters (same as module parameters, different interface).
*/
static struct ctl_table nlm_sysctls[] = {
{
.procname = "nlm_grace_period",
.data = &nlm_grace_period,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = (unsigned long *) &nlm_grace_period_min,
.extra2 = (unsigned long *) &nlm_grace_period_max,
},
{
.procname = "nlm_timeout",
.data = &nlm_timeout,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = (unsigned long *) &nlm_timeout_min,
.extra2 = (unsigned long *) &nlm_timeout_max,
},
{
.procname = "nlm_udpport",
.data = &nlm_udpport,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (int *) &nlm_port_min,
.extra2 = (int *) &nlm_port_max,
},
{
.procname = "nlm_tcpport",
.data = &nlm_tcpport,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (int *) &nlm_port_min,
.extra2 = (int *) &nlm_port_max,
},
{
.procname = "nsm_use_hostnames",
.data = &nsm_use_hostnames,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "nsm_local_state",
.data = &nsm_local_state,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
static struct ctl_table nlm_sysctl_dir[] = {
{
.procname = "nfs",
.mode = 0555,
.child = nlm_sysctls,
},
{ }
};
static struct ctl_table nlm_sysctl_root[] = {
{
.procname = "fs",
.mode = 0555,
.child = nlm_sysctl_dir,
},
{ }
};
#endif /* CONFIG_SYSCTL */
/*
* Module (and sysfs) parameters.
*/
#define param_set_min_max(name, type, which_strtol, min, max) \
static int param_set_##name(const char *val, struct kernel_param *kp) \
{ \
char *endp; \
__typeof__(type) num = which_strtol(val, &endp, 0); \
if (endp == val || *endp || num < (min) || num > (max)) \
return -EINVAL; \
*((type *) kp->arg) = num; \
return 0; \
}
static inline int is_callback(u32 proc)
{
return proc == NLMPROC_GRANTED
|| proc == NLMPROC_GRANTED_MSG
|| proc == NLMPROC_TEST_RES
|| proc == NLMPROC_LOCK_RES
|| proc == NLMPROC_CANCEL_RES
|| proc == NLMPROC_UNLOCK_RES
|| proc == NLMPROC_NSM_NOTIFY;
}
static int lockd_authenticate(struct svc_rqst *rqstp)
{
rqstp->rq_client = NULL;
switch (rqstp->rq_authop->flavour) {
case RPC_AUTH_NULL:
case RPC_AUTH_UNIX:
if (rqstp->rq_proc == 0)
return SVC_OK;
if (is_callback(rqstp->rq_proc)) {
/* Leave it to individual procedures to
* call nlmsvc_lookup_host(rqstp)
*/
return SVC_OK;
}
return svc_set_client(rqstp);
}
return SVC_DENIED;
}
param_set_min_max(port, int, simple_strtol, 0, 65535)
param_set_min_max(grace_period, unsigned long, simple_strtoul,
nlm_grace_period_min, nlm_grace_period_max)
param_set_min_max(timeout, unsigned long, simple_strtoul,
nlm_timeout_min, nlm_timeout_max)
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
MODULE_DESCRIPTION("NFS file locking service version " LOCKD_VERSION ".");
MODULE_LICENSE("GPL");
module_param_call(nlm_grace_period, param_set_grace_period, param_get_ulong,
&nlm_grace_period, 0644);
module_param_call(nlm_timeout, param_set_timeout, param_get_ulong,
&nlm_timeout, 0644);
module_param_call(nlm_udpport, param_set_port, param_get_int,
&nlm_udpport, 0644);
module_param_call(nlm_tcpport, param_set_port, param_get_int,
&nlm_tcpport, 0644);
module_param(nsm_use_hostnames, bool, 0644);
module_param(nlm_max_connections, uint, 0644);
static int lockd_init_net(struct net *net)
{
struct lockd_net *ln = net_generic(net, lockd_net_id);
INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
INIT_LIST_HEAD(&ln->lockd_manager.list);
ln->lockd_manager.block_opens = false;
INIT_LIST_HEAD(&ln->nsm_handles);
return 0;
}
static void lockd_exit_net(struct net *net)
{
}
static struct pernet_operations lockd_net_ops = {
.init = lockd_init_net,
.exit = lockd_exit_net,
.id = &lockd_net_id,
.size = sizeof(struct lockd_net),
};
/*
* Initialising and terminating the module.
*/
static int __init init_nlm(void)
{
int err;
#ifdef CONFIG_SYSCTL
err = -ENOMEM;
nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root);
if (nlm_sysctl_table == NULL)
goto err_sysctl;
#endif
err = register_pernet_subsys(&lockd_net_ops);
if (err)
goto err_pernet;
err = lockd_create_procfs();
if (err)
goto err_procfs;
return 0;
err_procfs:
unregister_pernet_subsys(&lockd_net_ops);
err_pernet:
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(nlm_sysctl_table);
err_sysctl:
#endif
return err;
}
static void __exit exit_nlm(void)
{
/* FIXME: delete all NLM clients */
nlm_shutdown_hosts();
lockd_remove_procfs();
unregister_pernet_subsys(&lockd_net_ops);
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(nlm_sysctl_table);
#endif
}
module_init(init_nlm);
module_exit(exit_nlm);
/*
* Define NLM program and procedures
*/
static struct svc_version nlmsvc_version1 = {
.vs_vers = 1,
.vs_nproc = 17,
.vs_proc = nlmsvc_procedures,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
static struct svc_version nlmsvc_version3 = {
.vs_vers = 3,
.vs_nproc = 24,
.vs_proc = nlmsvc_procedures,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
#ifdef CONFIG_LOCKD_V4
static struct svc_version nlmsvc_version4 = {
.vs_vers = 4,
.vs_nproc = 24,
.vs_proc = nlmsvc_procedures4,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
#endif
static struct svc_version * nlmsvc_version[] = {
[1] = &nlmsvc_version1,
[3] = &nlmsvc_version3,
#ifdef CONFIG_LOCKD_V4
[4] = &nlmsvc_version4,
#endif
};
static struct svc_stat nlmsvc_stats;
#define NLM_NRVERS ARRAY_SIZE(nlmsvc_version)
static struct svc_program nlmsvc_program = {
.pg_prog = NLM_PROGRAM, /* program number */
.pg_nvers = NLM_NRVERS, /* number of entries in nlmsvc_version */
.pg_vers = nlmsvc_version, /* version table */
.pg_name = "lockd", /* service name */
.pg_class = "nfsd", /* share authentication with nfsd */
.pg_stats = &nlmsvc_stats, /* stats table */
.pg_authenticate = &lockd_authenticate /* export authentication */
};
| ./CrossVul/dataset_final_sorted/CWE-404/c/good_3351_0 |
crossvul-cpp_data_good_3351_4 | /*
* Server-side procedures for NFSv4.
*
* Copyright (c) 2002 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <kmsmith@umich.edu>
* Andy Adamson <andros@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/file.h>
#include <linux/falloc.h>
#include <linux/slab.h>
#include "idmap.h"
#include "cache.h"
#include "xdr4.h"
#include "vfs.h"
#include "current_stateid.h"
#include "netns.h"
#include "acl.h"
#include "pnfs.h"
#include "trace.h"
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
#include <linux/security.h>
static inline void
nfsd4_security_inode_setsecctx(struct svc_fh *resfh, struct xdr_netobj *label, u32 *bmval)
{
struct inode *inode = d_inode(resfh->fh_dentry);
int status;
inode_lock(inode);
status = security_inode_setsecctx(resfh->fh_dentry,
label->data, label->len);
inode_unlock(inode);
if (status)
/*
* XXX: We should really fail the whole open, but we may
* already have created a new file, so it may be too
* late. For now this seems the least of evils:
*/
bmval[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
return;
}
#else
static inline void
nfsd4_security_inode_setsecctx(struct svc_fh *resfh, struct xdr_netobj *label, u32 *bmval)
{ }
#endif
#define NFSDDBG_FACILITY NFSDDBG_PROC
static u32 nfsd_attrmask[] = {
NFSD_WRITEABLE_ATTRS_WORD0,
NFSD_WRITEABLE_ATTRS_WORD1,
NFSD_WRITEABLE_ATTRS_WORD2
};
static u32 nfsd41_ex_attrmask[] = {
NFSD_SUPPATTR_EXCLCREAT_WORD0,
NFSD_SUPPATTR_EXCLCREAT_WORD1,
NFSD_SUPPATTR_EXCLCREAT_WORD2
};
static __be32
check_attr_support(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
u32 *bmval, u32 *writable)
{
struct dentry *dentry = cstate->current_fh.fh_dentry;
struct svc_export *exp = cstate->current_fh.fh_export;
if (!nfsd_attrs_supported(cstate->minorversion, bmval))
return nfserr_attrnotsupp;
if ((bmval[0] & FATTR4_WORD0_ACL) && !IS_POSIXACL(d_inode(dentry)))
return nfserr_attrnotsupp;
if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) &&
!(exp->ex_flags & NFSEXP_SECURITY_LABEL))
return nfserr_attrnotsupp;
if (writable && !bmval_is_subset(bmval, writable))
return nfserr_inval;
if (writable && (bmval[2] & FATTR4_WORD2_MODE_UMASK) &&
(bmval[1] & FATTR4_WORD1_MODE))
return nfserr_inval;
return nfs_ok;
}
static __be32
nfsd4_check_open_attributes(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
{
__be32 status = nfs_ok;
if (open->op_create == NFS4_OPEN_CREATE) {
if (open->op_createmode == NFS4_CREATE_UNCHECKED
|| open->op_createmode == NFS4_CREATE_GUARDED)
status = check_attr_support(rqstp, cstate,
open->op_bmval, nfsd_attrmask);
else if (open->op_createmode == NFS4_CREATE_EXCLUSIVE4_1)
status = check_attr_support(rqstp, cstate,
open->op_bmval, nfsd41_ex_attrmask);
}
return status;
}
static int
is_create_with_attrs(struct nfsd4_open *open)
{
return open->op_create == NFS4_OPEN_CREATE
&& (open->op_createmode == NFS4_CREATE_UNCHECKED
|| open->op_createmode == NFS4_CREATE_GUARDED
|| open->op_createmode == NFS4_CREATE_EXCLUSIVE4_1);
}
/*
* if error occurs when setting the acl, just clear the acl bit
* in the returned attr bitmap.
*/
static void
do_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct nfs4_acl *acl, u32 *bmval)
{
__be32 status;
status = nfsd4_set_nfs4_acl(rqstp, fhp, acl);
if (status)
/*
* We should probably fail the whole open at this point,
* but we've already created the file, so it's too late;
* So this seems the least of evils:
*/
bmval[0] &= ~FATTR4_WORD0_ACL;
}
static inline void
fh_dup2(struct svc_fh *dst, struct svc_fh *src)
{
fh_put(dst);
dget(src->fh_dentry);
if (src->fh_export)
exp_get(src->fh_export);
*dst = *src;
}
static __be32
do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open, int accmode)
{
__be32 status;
if (open->op_truncate &&
!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
return nfserr_inval;
accmode |= NFSD_MAY_READ_IF_EXEC;
if (open->op_share_access & NFS4_SHARE_ACCESS_READ)
accmode |= NFSD_MAY_READ;
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
accmode |= (NFSD_MAY_WRITE | NFSD_MAY_TRUNC);
if (open->op_share_deny & NFS4_SHARE_DENY_READ)
accmode |= NFSD_MAY_WRITE;
status = fh_verify(rqstp, current_fh, S_IFREG, accmode);
return status;
}
static __be32 nfsd_check_obj_isreg(struct svc_fh *fh)
{
umode_t mode = d_inode(fh->fh_dentry)->i_mode;
if (S_ISREG(mode))
return nfs_ok;
if (S_ISDIR(mode))
return nfserr_isdir;
/*
* Using err_symlink as our catch-all case may look odd; but
* there's no other obvious error for this case in 4.0, and we
* happen to know that it will cause the linux v4 client to do
* the right thing on attempts to open something other than a
* regular file.
*/
return nfserr_symlink;
}
static void nfsd4_set_open_owner_reply_cache(struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh *resfh)
{
if (nfsd4_has_session(cstate))
return;
fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
&resfh->fh_handle);
}
static __be32
do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh **resfh)
{
struct svc_fh *current_fh = &cstate->current_fh;
int accmode;
__be32 status;
*resfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
if (!*resfh)
return nfserr_jukebox;
fh_init(*resfh, NFS4_FHSIZE);
open->op_truncate = 0;
if (open->op_create) {
/* FIXME: check session persistence and pnfs flags.
* The nfsv4.1 spec requires the following semantics:
*
* Persistent | pNFS | Server REQUIRED | Client Allowed
* Reply Cache | server | |
* -------------+--------+-----------------+--------------------
* no | no | EXCLUSIVE4_1 | EXCLUSIVE4_1
* | | | (SHOULD)
* | | and EXCLUSIVE4 | or EXCLUSIVE4
* | | | (SHOULD NOT)
* no | yes | EXCLUSIVE4_1 | EXCLUSIVE4_1
* yes | no | GUARDED4 | GUARDED4
* yes | yes | GUARDED4 | GUARDED4
*/
/*
* Note: create modes (UNCHECKED,GUARDED...) are the same
* in NFSv4 as in v3 except EXCLUSIVE4_1.
*/
status = do_nfsd_create(rqstp, current_fh, open->op_fname.data,
open->op_fname.len, &open->op_iattr,
*resfh, open->op_createmode,
(u32 *)open->op_verf.data,
&open->op_truncate, &open->op_created);
if (!status && open->op_label.len)
nfsd4_security_inode_setsecctx(*resfh, &open->op_label, open->op_bmval);
/*
* Following rfc 3530 14.2.16, and rfc 5661 18.16.4
* use the returned bitmask to indicate which attributes
* we used to store the verifier:
*/
if (nfsd_create_is_exclusive(open->op_createmode) && status == 0)
open->op_bmval[1] |= (FATTR4_WORD1_TIME_ACCESS |
FATTR4_WORD1_TIME_MODIFY);
} else
/*
* Note this may exit with the parent still locked.
* We will hold the lock until nfsd4_open's final
* lookup, to prevent renames or unlinks until we've had
* a chance to an acquire a delegation if appropriate.
*/
status = nfsd_lookup(rqstp, current_fh,
open->op_fname.data, open->op_fname.len, *resfh);
if (status)
goto out;
status = nfsd_check_obj_isreg(*resfh);
if (status)
goto out;
if (is_create_with_attrs(open) && open->op_acl != NULL)
do_set_nfs4_acl(rqstp, *resfh, open->op_acl, open->op_bmval);
nfsd4_set_open_owner_reply_cache(cstate, open, *resfh);
accmode = NFSD_MAY_NOP;
if (open->op_created ||
open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
accmode |= NFSD_MAY_OWNER_OVERRIDE;
status = do_open_permission(rqstp, *resfh, open, accmode);
set_change_info(&open->op_cinfo, current_fh);
out:
return status;
}
static __be32
do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
{
struct svc_fh *current_fh = &cstate->current_fh;
__be32 status;
int accmode = 0;
/* We don't know the target directory, and therefore can not
* set the change info
*/
memset(&open->op_cinfo, 0, sizeof(struct nfsd4_change_info));
nfsd4_set_open_owner_reply_cache(cstate, open, current_fh);
open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
(open->op_iattr.ia_size == 0);
/*
* In the delegation case, the client is telling us about an
* open that it *already* performed locally, some time ago. We
* should let it succeed now if possible.
*
* In the case of a CLAIM_FH open, on the other hand, the client
* may be counting on us to enforce permissions (the Linux 4.1
* client uses this for normal opens, for example).
*/
if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH)
accmode = NFSD_MAY_OWNER_OVERRIDE;
status = do_open_permission(rqstp, current_fh, open, accmode);
return status;
}
static void
copy_clientid(clientid_t *clid, struct nfsd4_session *session)
{
struct nfsd4_sessionid *sid =
(struct nfsd4_sessionid *)session->se_sessionid.data;
clid->cl_boot = sid->clientid.cl_boot;
clid->cl_id = sid->clientid.cl_id;
}
static __be32
nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_open *open)
{
__be32 status;
struct svc_fh *resfh = NULL;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dprintk("NFSD: nfsd4_open filename %.*s op_openowner %p\n",
(int)open->op_fname.len, open->op_fname.data,
open->op_openowner);
/* This check required by spec. */
if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL)
return nfserr_inval;
open->op_created = 0;
/*
* RFC5661 18.51.3
* Before RECLAIM_COMPLETE done, server should deny new lock
*/
if (nfsd4_has_session(cstate) &&
!test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
&cstate->session->se_client->cl_flags) &&
open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
return nfserr_grace;
if (nfsd4_has_session(cstate))
copy_clientid(&open->op_clientid, cstate->session);
/* check seqid for replay. set nfs4_owner */
status = nfsd4_process_open1(cstate, open, nn);
if (status == nfserr_replay_me) {
struct nfs4_replay *rp = &open->op_openowner->oo_owner.so_replay;
fh_put(&cstate->current_fh);
fh_copy_shallow(&cstate->current_fh.fh_handle,
&rp->rp_openfh);
status = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP);
if (status)
dprintk("nfsd4_open: replay failed"
" restoring previous filehandle\n");
else
status = nfserr_replay_me;
}
if (status)
goto out;
if (open->op_xdr_error) {
status = open->op_xdr_error;
goto out;
}
status = nfsd4_check_open_attributes(rqstp, cstate, open);
if (status)
goto out;
/* Openowner is now set, so sequence id will get bumped. Now we need
* these checks before we do any creates: */
status = nfserr_grace;
if (opens_in_grace(net) && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
goto out;
status = nfserr_no_grace;
if (!opens_in_grace(net) && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
goto out;
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
case NFS4_OPEN_CLAIM_NULL:
status = do_open_lookup(rqstp, cstate, open, &resfh);
if (status)
goto out;
break;
case NFS4_OPEN_CLAIM_PREVIOUS:
status = nfs4_check_open_reclaim(&open->op_clientid,
cstate, nn);
if (status)
goto out;
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
case NFS4_OPEN_CLAIM_FH:
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
status = do_open_fhandle(rqstp, cstate, open);
if (status)
goto out;
resfh = &cstate->current_fh;
break;
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
case NFS4_OPEN_CLAIM_DELEGATE_PREV:
dprintk("NFSD: unsupported OPEN claim type %d\n",
open->op_claim_type);
status = nfserr_notsupp;
goto out;
default:
dprintk("NFSD: Invalid OPEN claim type %d\n",
open->op_claim_type);
status = nfserr_inval;
goto out;
}
/*
* nfsd4_process_open2() does the actual opening of the file. If
* successful, it (1) truncates the file if open->op_truncate was
* set, (2) sets open->op_stateid, (3) sets open->op_delegation.
*/
status = nfsd4_process_open2(rqstp, resfh, open);
WARN(status && open->op_created,
"nfsd4_process_open2 failed to open newly-created file! status=%u\n",
be32_to_cpu(status));
out:
if (resfh && resfh != &cstate->current_fh) {
fh_dup2(&cstate->current_fh, resfh);
fh_put(resfh);
kfree(resfh);
}
nfsd4_cleanup_open_state(cstate, open);
nfsd4_bump_seqid(cstate, status);
return status;
}
/*
* OPEN is the only seqid-mutating operation whose decoding can fail
* with a seqid-mutating error (specifically, decoding of user names in
* the attributes). Therefore we have to do some processing to look up
* the stateowner so that we can bump the seqid.
*/
static __be32 nfsd4_open_omfg(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_op *op)
{
struct nfsd4_open *open = (struct nfsd4_open *)&op->u;
if (!seqid_mutating_err(ntohl(op->status)))
return op->status;
if (nfsd4_has_session(cstate))
return op->status;
open->op_xdr_error = op->status;
return nfsd4_open(rqstp, cstate, open);
}
/*
* filehandle-manipulating ops.
*/
static __be32
nfsd4_getfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct svc_fh **getfh)
{
if (!cstate->current_fh.fh_dentry)
return nfserr_nofilehandle;
*getfh = &cstate->current_fh;
return nfs_ok;
}
static __be32
nfsd4_putfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_putfh *putfh)
{
fh_put(&cstate->current_fh);
cstate->current_fh.fh_handle.fh_size = putfh->pf_fhlen;
memcpy(&cstate->current_fh.fh_handle.fh_base, putfh->pf_fhval,
putfh->pf_fhlen);
return fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_BYPASS_GSS);
}
static __be32
nfsd4_putrootfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
void *arg)
{
__be32 status;
fh_put(&cstate->current_fh);
status = exp_pseudoroot(rqstp, &cstate->current_fh);
return status;
}
static __be32
nfsd4_restorefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
void *arg)
{
if (!cstate->save_fh.fh_dentry)
return nfserr_restorefh;
fh_dup2(&cstate->current_fh, &cstate->save_fh);
if (HAS_STATE_ID(cstate, SAVED_STATE_ID_FLAG)) {
memcpy(&cstate->current_stateid, &cstate->save_stateid, sizeof(stateid_t));
SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
}
return nfs_ok;
}
static __be32
nfsd4_savefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
void *arg)
{
if (!cstate->current_fh.fh_dentry)
return nfserr_nofilehandle;
fh_dup2(&cstate->save_fh, &cstate->current_fh);
if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG)) {
memcpy(&cstate->save_stateid, &cstate->current_stateid, sizeof(stateid_t));
SET_STATE_ID(cstate, SAVED_STATE_ID_FLAG);
}
return nfs_ok;
}
/*
* misc nfsv4 ops
*/
static __be32
nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_access *access)
{
if (access->ac_req_access & ~NFS3_ACCESS_FULL)
return nfserr_inval;
access->ac_resp_access = access->ac_req_access;
return nfsd_access(rqstp, &cstate->current_fh, &access->ac_resp_access,
&access->ac_supported);
}
static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net)
{
__be32 verf[2];
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
/*
* This is opaque to client, so no need to byte-swap. Use
* __force to keep sparse happy
*/
verf[0] = (__force __be32)nn->nfssvc_boot.tv_sec;
verf[1] = (__force __be32)nn->nfssvc_boot.tv_usec;
memcpy(verifier->data, verf, sizeof(verifier->data));
}
static __be32
nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_commit *commit)
{
gen_boot_verifier(&commit->co_verf, SVC_NET(rqstp));
return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
commit->co_count);
}
static __be32
nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_create *create)
{
struct svc_fh resfh;
__be32 status;
dev_t rdev;
fh_init(&resfh, NFS4_FHSIZE);
status = fh_verify(rqstp, &cstate->current_fh, S_IFDIR, NFSD_MAY_NOP);
if (status)
return status;
status = check_attr_support(rqstp, cstate, create->cr_bmval,
nfsd_attrmask);
if (status)
return status;
switch (create->cr_type) {
case NF4LNK:
status = nfsd_symlink(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
create->cr_data, &resfh);
break;
case NF4BLK:
rdev = MKDEV(create->cr_specdata1, create->cr_specdata2);
if (MAJOR(rdev) != create->cr_specdata1 ||
MINOR(rdev) != create->cr_specdata2)
return nfserr_inval;
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
&create->cr_iattr, S_IFBLK, rdev, &resfh);
break;
case NF4CHR:
rdev = MKDEV(create->cr_specdata1, create->cr_specdata2);
if (MAJOR(rdev) != create->cr_specdata1 ||
MINOR(rdev) != create->cr_specdata2)
return nfserr_inval;
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
&create->cr_iattr,S_IFCHR, rdev, &resfh);
break;
case NF4SOCK:
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
&create->cr_iattr, S_IFSOCK, 0, &resfh);
break;
case NF4FIFO:
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
&create->cr_iattr, S_IFIFO, 0, &resfh);
break;
case NF4DIR:
create->cr_iattr.ia_valid &= ~ATTR_SIZE;
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
&create->cr_iattr, S_IFDIR, 0, &resfh);
break;
default:
status = nfserr_badtype;
}
if (status)
goto out;
if (create->cr_label.len)
nfsd4_security_inode_setsecctx(&resfh, &create->cr_label, create->cr_bmval);
if (create->cr_acl != NULL)
do_set_nfs4_acl(rqstp, &resfh, create->cr_acl,
create->cr_bmval);
fh_unlock(&cstate->current_fh);
set_change_info(&create->cr_cinfo, &cstate->current_fh);
fh_dup2(&cstate->current_fh, &resfh);
out:
fh_put(&resfh);
return status;
}
static __be32
nfsd4_getattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_getattr *getattr)
{
__be32 status;
status = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP);
if (status)
return status;
if (getattr->ga_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1)
return nfserr_inval;
getattr->ga_bmval[0] &= nfsd_suppattrs[cstate->minorversion][0];
getattr->ga_bmval[1] &= nfsd_suppattrs[cstate->minorversion][1];
getattr->ga_bmval[2] &= nfsd_suppattrs[cstate->minorversion][2];
getattr->ga_fhp = &cstate->current_fh;
return nfs_ok;
}
static __be32
nfsd4_link(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_link *link)
{
__be32 status = nfserr_nofilehandle;
if (!cstate->save_fh.fh_dentry)
return status;
status = nfsd_link(rqstp, &cstate->current_fh,
link->li_name, link->li_namelen, &cstate->save_fh);
if (!status)
set_change_info(&link->li_cinfo, &cstate->current_fh);
return status;
}
static __be32 nfsd4_do_lookupp(struct svc_rqst *rqstp, struct svc_fh *fh)
{
struct svc_fh tmp_fh;
__be32 ret;
fh_init(&tmp_fh, NFS4_FHSIZE);
ret = exp_pseudoroot(rqstp, &tmp_fh);
if (ret)
return ret;
if (tmp_fh.fh_dentry == fh->fh_dentry) {
fh_put(&tmp_fh);
return nfserr_noent;
}
fh_put(&tmp_fh);
return nfsd_lookup(rqstp, fh, "..", 2, fh);
}
static __be32
nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
void *arg)
{
return nfsd4_do_lookupp(rqstp, &cstate->current_fh);
}
static __be32
nfsd4_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_lookup *lookup)
{
return nfsd_lookup(rqstp, &cstate->current_fh,
lookup->lo_name, lookup->lo_len,
&cstate->current_fh);
}
static __be32
nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_read *read)
{
__be32 status;
read->rd_filp = NULL;
if (read->rd_offset >= OFFSET_MAX)
return nfserr_inval;
/*
* If we do a zero copy read, then a client will see read data
* that reflects the state of the file *after* performing the
* following compound.
*
* To ensure proper ordering, we therefore turn off zero copy if
* the client wants us to do more in this compound:
*/
if (!nfsd4_last_compound_op(rqstp))
clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
/* check stateid */
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&read->rd_stateid, RD_STATE,
&read->rd_filp, &read->rd_tmp_file);
if (status) {
dprintk("NFSD: nfsd4_read: couldn't process stateid!\n");
goto out;
}
status = nfs_ok;
out:
read->rd_rqstp = rqstp;
read->rd_fhp = &cstate->current_fh;
return status;
}
static __be32
nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_readdir *readdir)
{
u64 cookie = readdir->rd_cookie;
static const nfs4_verifier zeroverf;
/* no need to check permission - this will be done in nfsd_readdir() */
if (readdir->rd_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1)
return nfserr_inval;
readdir->rd_bmval[0] &= nfsd_suppattrs[cstate->minorversion][0];
readdir->rd_bmval[1] &= nfsd_suppattrs[cstate->minorversion][1];
readdir->rd_bmval[2] &= nfsd_suppattrs[cstate->minorversion][2];
if ((cookie == 1) || (cookie == 2) ||
(cookie == 0 && memcmp(readdir->rd_verf.data, zeroverf.data, NFS4_VERIFIER_SIZE)))
return nfserr_bad_cookie;
readdir->rd_rqstp = rqstp;
readdir->rd_fhp = &cstate->current_fh;
return nfs_ok;
}
static __be32
nfsd4_readlink(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_readlink *readlink)
{
readlink->rl_rqstp = rqstp;
readlink->rl_fhp = &cstate->current_fh;
return nfs_ok;
}
static __be32
nfsd4_remove(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_remove *remove)
{
__be32 status;
if (opens_in_grace(SVC_NET(rqstp)))
return nfserr_grace;
status = nfsd_unlink(rqstp, &cstate->current_fh, 0,
remove->rm_name, remove->rm_namelen);
if (!status) {
fh_unlock(&cstate->current_fh);
set_change_info(&remove->rm_cinfo, &cstate->current_fh);
}
return status;
}
static __be32
nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_rename *rename)
{
__be32 status = nfserr_nofilehandle;
if (!cstate->save_fh.fh_dentry)
return status;
if (opens_in_grace(SVC_NET(rqstp)) &&
!(cstate->save_fh.fh_export->ex_flags & NFSEXP_NOSUBTREECHECK))
return nfserr_grace;
status = nfsd_rename(rqstp, &cstate->save_fh, rename->rn_sname,
rename->rn_snamelen, &cstate->current_fh,
rename->rn_tname, rename->rn_tnamelen);
if (status)
return status;
set_change_info(&rename->rn_sinfo, &cstate->current_fh);
set_change_info(&rename->rn_tinfo, &cstate->save_fh);
return nfs_ok;
}
static __be32
nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_secinfo *secinfo)
{
struct svc_export *exp;
struct dentry *dentry;
__be32 err;
err = fh_verify(rqstp, &cstate->current_fh, S_IFDIR, NFSD_MAY_EXEC);
if (err)
return err;
err = nfsd_lookup_dentry(rqstp, &cstate->current_fh,
secinfo->si_name, secinfo->si_namelen,
&exp, &dentry);
if (err)
return err;
fh_unlock(&cstate->current_fh);
if (d_really_is_negative(dentry)) {
exp_put(exp);
err = nfserr_noent;
} else
secinfo->si_exp = exp;
dput(dentry);
if (cstate->minorversion)
/* See rfc 5661 section 2.6.3.1.1.8 */
fh_put(&cstate->current_fh);
return err;
}
static __be32
nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_secinfo_no_name *sin)
{
__be32 err;
switch (sin->sin_style) {
case NFS4_SECINFO_STYLE4_CURRENT_FH:
break;
case NFS4_SECINFO_STYLE4_PARENT:
err = nfsd4_do_lookupp(rqstp, &cstate->current_fh);
if (err)
return err;
break;
default:
return nfserr_inval;
}
sin->sin_exp = exp_get(cstate->current_fh.fh_export);
fh_put(&cstate->current_fh);
return nfs_ok;
}
static __be32
nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_setattr *setattr)
{
__be32 status = nfs_ok;
int err;
if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
status = nfs4_preprocess_stateid_op(rqstp, cstate,
&cstate->current_fh, &setattr->sa_stateid,
WR_STATE, NULL, NULL);
if (status) {
dprintk("NFSD: nfsd4_setattr: couldn't process stateid!\n");
return status;
}
}
err = fh_want_write(&cstate->current_fh);
if (err)
return nfserrno(err);
status = nfs_ok;
status = check_attr_support(rqstp, cstate, setattr->sa_bmval,
nfsd_attrmask);
if (status)
goto out;
if (setattr->sa_acl != NULL)
status = nfsd4_set_nfs4_acl(rqstp, &cstate->current_fh,
setattr->sa_acl);
if (status)
goto out;
if (setattr->sa_label.len)
status = nfsd4_set_nfs4_label(rqstp, &cstate->current_fh,
&setattr->sa_label);
if (status)
goto out;
status = nfsd_setattr(rqstp, &cstate->current_fh, &setattr->sa_iattr,
0, (time_t)0);
out:
fh_drop_write(&cstate->current_fh);
return status;
}
static int fill_in_write_vector(struct kvec *vec, struct nfsd4_write *write)
{
int i = 1;
int buflen = write->wr_buflen;
vec[0].iov_base = write->wr_head.iov_base;
vec[0].iov_len = min_t(int, buflen, write->wr_head.iov_len);
buflen -= vec[0].iov_len;
while (buflen) {
vec[i].iov_base = page_address(write->wr_pagelist[i - 1]);
vec[i].iov_len = min_t(int, PAGE_SIZE, buflen);
buflen -= vec[i].iov_len;
i++;
}
return i;
}
static __be32
nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_write *write)
{
stateid_t *stateid = &write->wr_stateid;
struct file *filp = NULL;
__be32 status = nfs_ok;
unsigned long cnt;
int nvecs;
if (write->wr_offset >= OFFSET_MAX)
return nfserr_inval;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
stateid, WR_STATE, &filp, NULL);
if (status) {
dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
return status;
}
cnt = write->wr_buflen;
write->wr_how_written = write->wr_stable_how;
gen_boot_verifier(&write->wr_verifier, SVC_NET(rqstp));
nvecs = fill_in_write_vector(rqstp->rq_vec, write);
WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
status = nfsd_vfs_write(rqstp, &cstate->current_fh, filp,
write->wr_offset, rqstp->rq_vec, nvecs, &cnt,
write->wr_how_written);
fput(filp);
write->wr_bytes_written = cnt;
return status;
}
static __be32
nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
stateid_t *src_stateid, struct file **src,
stateid_t *dst_stateid, struct file **dst)
{
__be32 status;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
src_stateid, RD_STATE, src, NULL);
if (status) {
dprintk("NFSD: %s: couldn't process src stateid!\n", __func__);
goto out;
}
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
dst_stateid, WR_STATE, dst, NULL);
if (status) {
dprintk("NFSD: %s: couldn't process dst stateid!\n", __func__);
goto out_put_src;
}
/* fix up for NFS-specific error code */
if (!S_ISREG(file_inode(*src)->i_mode) ||
!S_ISREG(file_inode(*dst)->i_mode)) {
status = nfserr_wrong_type;
goto out_put_dst;
}
out:
return status;
out_put_dst:
fput(*dst);
out_put_src:
fput(*src);
goto out;
}
static __be32
nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_clone *clone)
{
struct file *src, *dst;
__be32 status;
status = nfsd4_verify_copy(rqstp, cstate, &clone->cl_src_stateid, &src,
&clone->cl_dst_stateid, &dst);
if (status)
goto out;
status = nfsd4_clone_file_range(src, clone->cl_src_pos,
dst, clone->cl_dst_pos, clone->cl_count);
fput(dst);
fput(src);
out:
return status;
}
static __be32
nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_copy *copy)
{
struct file *src, *dst;
__be32 status;
ssize_t bytes;
status = nfsd4_verify_copy(rqstp, cstate, ©->cp_src_stateid, &src,
©->cp_dst_stateid, &dst);
if (status)
goto out;
bytes = nfsd_copy_file_range(src, copy->cp_src_pos,
dst, copy->cp_dst_pos, copy->cp_count);
if (bytes < 0)
status = nfserrno(bytes);
else {
copy->cp_res.wr_bytes_written = bytes;
copy->cp_res.wr_stable_how = NFS_UNSTABLE;
copy->cp_consecutive = 1;
copy->cp_synchronous = 1;
gen_boot_verifier(©->cp_res.wr_verifier, SVC_NET(rqstp));
status = nfs_ok;
}
fput(src);
fput(dst);
out:
return status;
}
static __be32
nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_fallocate *fallocate, int flags)
{
__be32 status = nfserr_notsupp;
struct file *file;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&fallocate->falloc_stateid,
WR_STATE, &file, NULL);
if (status != nfs_ok) {
dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
return status;
}
status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file,
fallocate->falloc_offset,
fallocate->falloc_length,
flags);
fput(file);
return status;
}
static __be32
nfsd4_allocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_fallocate *fallocate)
{
return nfsd4_fallocate(rqstp, cstate, fallocate, 0);
}
static __be32
nfsd4_deallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_fallocate *fallocate)
{
return nfsd4_fallocate(rqstp, cstate, fallocate,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE);
}
static __be32
nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_seek *seek)
{
int whence;
__be32 status;
struct file *file;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&seek->seek_stateid,
RD_STATE, &file, NULL);
if (status) {
dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n");
return status;
}
switch (seek->seek_whence) {
case NFS4_CONTENT_DATA:
whence = SEEK_DATA;
break;
case NFS4_CONTENT_HOLE:
whence = SEEK_HOLE;
break;
default:
status = nfserr_union_notsupp;
goto out;
}
/*
* Note: This call does change file->f_pos, but nothing in NFSD
* should ever file->f_pos.
*/
seek->seek_pos = vfs_llseek(file, seek->seek_offset, whence);
if (seek->seek_pos < 0)
status = nfserrno(seek->seek_pos);
else if (seek->seek_pos >= i_size_read(file_inode(file)))
seek->seek_eof = true;
out:
fput(file);
return status;
}
/* This routine never returns NFS_OK! If there are no other errors, it
* will return NFSERR_SAME or NFSERR_NOT_SAME depending on whether the
* attributes matched. VERIFY is implemented by mapping NFSERR_SAME
* to NFS_OK after the call; NVERIFY by mapping NFSERR_NOT_SAME to NFS_OK.
*/
static __be32
_nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_verify *verify)
{
__be32 *buf, *p;
int count;
__be32 status;
status = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP);
if (status)
return status;
status = check_attr_support(rqstp, cstate, verify->ve_bmval, NULL);
if (status)
return status;
if ((verify->ve_bmval[0] & FATTR4_WORD0_RDATTR_ERROR)
|| (verify->ve_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1))
return nfserr_inval;
if (verify->ve_attrlen & 3)
return nfserr_inval;
/* count in words:
* bitmap_len(1) + bitmap(2) + attr_len(1) = 4
*/
count = 4 + (verify->ve_attrlen >> 2);
buf = kmalloc(count << 2, GFP_KERNEL);
if (!buf)
return nfserr_jukebox;
p = buf;
status = nfsd4_encode_fattr_to_buf(&p, count, &cstate->current_fh,
cstate->current_fh.fh_export,
cstate->current_fh.fh_dentry,
verify->ve_bmval,
rqstp, 0);
/*
* If nfsd4_encode_fattr() ran out of space, assume that's because
* the attributes are longer (hence different) than those given:
*/
if (status == nfserr_resource)
status = nfserr_not_same;
if (status)
goto out_kfree;
/* skip bitmap */
p = buf + 1 + ntohl(buf[0]);
status = nfserr_not_same;
if (ntohl(*p++) != verify->ve_attrlen)
goto out_kfree;
if (!memcmp(p, verify->ve_attrval, verify->ve_attrlen))
status = nfserr_same;
out_kfree:
kfree(buf);
return status;
}
static __be32
nfsd4_nverify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_verify *verify)
{
__be32 status;
status = _nfsd4_verify(rqstp, cstate, verify);
return status == nfserr_not_same ? nfs_ok : status;
}
static __be32
nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_verify *verify)
{
__be32 status;
status = _nfsd4_verify(rqstp, cstate, verify);
return status == nfserr_same ? nfs_ok : status;
}
#ifdef CONFIG_NFSD_PNFS
static const struct nfsd4_layout_ops *
nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type)
{
if (!exp->ex_layout_types) {
dprintk("%s: export does not support pNFS\n", __func__);
return NULL;
}
if (layout_type >= LAYOUT_TYPE_MAX ||
!(exp->ex_layout_types & (1 << layout_type))) {
dprintk("%s: layout type %d not supported\n",
__func__, layout_type);
return NULL;
}
return nfsd4_layout_ops[layout_type];
}
static __be32
nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_getdeviceinfo *gdp)
{
const struct nfsd4_layout_ops *ops;
struct nfsd4_deviceid_map *map;
struct svc_export *exp;
__be32 nfserr;
dprintk("%s: layout_type %u dev_id [0x%llx:0x%x] maxcnt %u\n",
__func__,
gdp->gd_layout_type,
gdp->gd_devid.fsid_idx, gdp->gd_devid.generation,
gdp->gd_maxcount);
map = nfsd4_find_devid_map(gdp->gd_devid.fsid_idx);
if (!map) {
dprintk("%s: couldn't find device ID to export mapping!\n",
__func__);
return nfserr_noent;
}
exp = rqst_exp_find(rqstp, map->fsid_type, map->fsid);
if (IS_ERR(exp)) {
dprintk("%s: could not find device id\n", __func__);
return nfserr_noent;
}
nfserr = nfserr_layoutunavailable;
ops = nfsd4_layout_verify(exp, gdp->gd_layout_type);
if (!ops)
goto out;
nfserr = nfs_ok;
if (gdp->gd_maxcount != 0) {
nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb,
rqstp, cstate->session->se_client, gdp);
}
gdp->gd_notify_types &= ops->notify_types;
out:
exp_put(exp);
return nfserr;
}
static __be32
nfsd4_layoutget(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_layoutget *lgp)
{
struct svc_fh *current_fh = &cstate->current_fh;
const struct nfsd4_layout_ops *ops;
struct nfs4_layout_stateid *ls;
__be32 nfserr;
int accmode;
switch (lgp->lg_seg.iomode) {
case IOMODE_READ:
accmode = NFSD_MAY_READ;
break;
case IOMODE_RW:
accmode = NFSD_MAY_READ | NFSD_MAY_WRITE;
break;
default:
dprintk("%s: invalid iomode %d\n",
__func__, lgp->lg_seg.iomode);
nfserr = nfserr_badiomode;
goto out;
}
nfserr = fh_verify(rqstp, current_fh, 0, accmode);
if (nfserr)
goto out;
nfserr = nfserr_layoutunavailable;
ops = nfsd4_layout_verify(current_fh->fh_export, lgp->lg_layout_type);
if (!ops)
goto out;
/*
* Verify minlength and range as per RFC5661:
* o If loga_length is less than loga_minlength,
* the metadata server MUST return NFS4ERR_INVAL.
* o If the sum of loga_offset and loga_minlength exceeds
* NFS4_UINT64_MAX, and loga_minlength is not
* NFS4_UINT64_MAX, the error NFS4ERR_INVAL MUST result.
* o If the sum of loga_offset and loga_length exceeds
* NFS4_UINT64_MAX, and loga_length is not NFS4_UINT64_MAX,
* the error NFS4ERR_INVAL MUST result.
*/
nfserr = nfserr_inval;
if (lgp->lg_seg.length < lgp->lg_minlength ||
(lgp->lg_minlength != NFS4_MAX_UINT64 &&
lgp->lg_minlength > NFS4_MAX_UINT64 - lgp->lg_seg.offset) ||
(lgp->lg_seg.length != NFS4_MAX_UINT64 &&
lgp->lg_seg.length > NFS4_MAX_UINT64 - lgp->lg_seg.offset))
goto out;
if (lgp->lg_seg.length == 0)
goto out;
nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lgp->lg_sid,
true, lgp->lg_layout_type, &ls);
if (nfserr) {
trace_layout_get_lookup_fail(&lgp->lg_sid);
goto out;
}
nfserr = nfserr_recallconflict;
if (atomic_read(&ls->ls_stid.sc_file->fi_lo_recalls))
goto out_put_stid;
nfserr = ops->proc_layoutget(d_inode(current_fh->fh_dentry),
current_fh, lgp);
if (nfserr)
goto out_put_stid;
nfserr = nfsd4_insert_layout(lgp, ls);
out_put_stid:
mutex_unlock(&ls->ls_mutex);
nfs4_put_stid(&ls->ls_stid);
out:
return nfserr;
}
static __be32
nfsd4_layoutcommit(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_layoutcommit *lcp)
{
const struct nfsd4_layout_seg *seg = &lcp->lc_seg;
struct svc_fh *current_fh = &cstate->current_fh;
const struct nfsd4_layout_ops *ops;
loff_t new_size = lcp->lc_last_wr + 1;
struct inode *inode;
struct nfs4_layout_stateid *ls;
__be32 nfserr;
nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_WRITE);
if (nfserr)
goto out;
nfserr = nfserr_layoutunavailable;
ops = nfsd4_layout_verify(current_fh->fh_export, lcp->lc_layout_type);
if (!ops)
goto out;
inode = d_inode(current_fh->fh_dentry);
nfserr = nfserr_inval;
if (new_size <= seg->offset) {
dprintk("pnfsd: last write before layout segment\n");
goto out;
}
if (new_size > seg->offset + seg->length) {
dprintk("pnfsd: last write beyond layout segment\n");
goto out;
}
if (!lcp->lc_newoffset && new_size > i_size_read(inode)) {
dprintk("pnfsd: layoutcommit beyond EOF\n");
goto out;
}
nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid,
false, lcp->lc_layout_type,
&ls);
if (nfserr) {
trace_layout_commit_lookup_fail(&lcp->lc_sid);
/* fixup error code as per RFC5661 */
if (nfserr == nfserr_bad_stateid)
nfserr = nfserr_badlayout;
goto out;
}
/* LAYOUTCOMMIT does not require any serialization */
mutex_unlock(&ls->ls_mutex);
if (new_size > i_size_read(inode)) {
lcp->lc_size_chg = 1;
lcp->lc_newsize = new_size;
} else {
lcp->lc_size_chg = 0;
}
nfserr = ops->proc_layoutcommit(inode, lcp);
nfs4_put_stid(&ls->ls_stid);
out:
return nfserr;
}
static __be32
nfsd4_layoutreturn(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_layoutreturn *lrp)
{
struct svc_fh *current_fh = &cstate->current_fh;
__be32 nfserr;
nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_NOP);
if (nfserr)
goto out;
nfserr = nfserr_layoutunavailable;
if (!nfsd4_layout_verify(current_fh->fh_export, lrp->lr_layout_type))
goto out;
switch (lrp->lr_seg.iomode) {
case IOMODE_READ:
case IOMODE_RW:
case IOMODE_ANY:
break;
default:
dprintk("%s: invalid iomode %d\n", __func__,
lrp->lr_seg.iomode);
nfserr = nfserr_inval;
goto out;
}
switch (lrp->lr_return_type) {
case RETURN_FILE:
nfserr = nfsd4_return_file_layouts(rqstp, cstate, lrp);
break;
case RETURN_FSID:
case RETURN_ALL:
nfserr = nfsd4_return_client_layouts(rqstp, cstate, lrp);
break;
default:
dprintk("%s: invalid return_type %d\n", __func__,
lrp->lr_return_type);
nfserr = nfserr_inval;
break;
}
out:
return nfserr;
}
#endif /* CONFIG_NFSD_PNFS */
/*
* NULL call.
*/
static __be32
nfsd4_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
{
return nfs_ok;
}
static inline void nfsd4_increment_op_stats(u32 opnum)
{
if (opnum >= FIRST_NFS4_OP && opnum <= LAST_NFS4_OP)
nfsdstats.nfs4_opcount[opnum]++;
}
typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
void *);
typedef u32(*nfsd4op_rsize)(struct svc_rqst *, struct nfsd4_op *op);
typedef void(*stateid_setter)(struct nfsd4_compound_state *, void *);
typedef void(*stateid_getter)(struct nfsd4_compound_state *, void *);
enum nfsd4_op_flags {
ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */
ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */
ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */
/* For rfc 5661 section 2.6.3.1.1: */
OP_HANDLES_WRONGSEC = 1 << 3,
OP_IS_PUTFH_LIKE = 1 << 4,
/*
* These are the ops whose result size we estimate before
* encoding, to avoid performing an op then not being able to
* respond or cache a response. This includes writes and setattrs
* as well as the operations usually called "nonidempotent":
*/
OP_MODIFIES_SOMETHING = 1 << 5,
/*
* Cache compounds containing these ops in the xid-based drc:
* We use the DRC for compounds containing non-idempotent
* operations, *except* those that are 4.1-specific (since
* sessions provide their own EOS), and except for stateful
* operations other than setclientid and setclientid_confirm
* (since sequence numbers provide EOS for open, lock, etc in
* the v4.0 case).
*/
OP_CACHEME = 1 << 6,
/*
* These are ops which clear current state id.
*/
OP_CLEAR_STATEID = 1 << 7,
};
struct nfsd4_operation {
nfsd4op_func op_func;
u32 op_flags;
char *op_name;
/* Try to get response size before operation */
nfsd4op_rsize op_rsize_bop;
stateid_getter op_get_currentstateid;
stateid_setter op_set_currentstateid;
};
static struct nfsd4_operation nfsd4_ops[];
static const char *nfsd4_op_name(unsigned opnum);
/*
* Enforce NFSv4.1 COMPOUND ordering rules:
*
* Also note, enforced elsewhere:
* - SEQUENCE other than as first op results in
* NFS4ERR_SEQUENCE_POS. (Enforced in nfsd4_sequence().)
* - BIND_CONN_TO_SESSION must be the only op in its compound.
* (Enforced in nfsd4_bind_conn_to_session().)
* - DESTROY_SESSION must be the final operation in a compound, if
* sessionid's in SEQUENCE and DESTROY_SESSION are the same.
* (Enforced in nfsd4_destroy_session().)
*/
static __be32 nfs41_check_op_ordering(struct nfsd4_compoundargs *args)
{
struct nfsd4_op *op = &args->ops[0];
/* These ordering requirements don't apply to NFSv4.0: */
if (args->minorversion == 0)
return nfs_ok;
/* This is weird, but OK, not our problem: */
if (args->opcnt == 0)
return nfs_ok;
if (op->status == nfserr_op_illegal)
return nfs_ok;
if (!(nfsd4_ops[op->opnum].op_flags & ALLOWED_AS_FIRST_OP))
return nfserr_op_not_in_session;
if (op->opnum == OP_SEQUENCE)
return nfs_ok;
if (args->opcnt != 1)
return nfserr_not_only_op;
return nfs_ok;
}
static inline struct nfsd4_operation *OPDESC(struct nfsd4_op *op)
{
return &nfsd4_ops[op->opnum];
}
bool nfsd4_cache_this_op(struct nfsd4_op *op)
{
if (op->opnum == OP_ILLEGAL)
return false;
return OPDESC(op)->op_flags & OP_CACHEME;
}
static bool need_wrongsec_check(struct svc_rqst *rqstp)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct nfsd4_compoundargs *argp = rqstp->rq_argp;
struct nfsd4_op *this = &argp->ops[resp->opcnt - 1];
struct nfsd4_op *next = &argp->ops[resp->opcnt];
struct nfsd4_operation *thisd;
struct nfsd4_operation *nextd;
thisd = OPDESC(this);
/*
* Most ops check wronsec on our own; only the putfh-like ops
* have special rules.
*/
if (!(thisd->op_flags & OP_IS_PUTFH_LIKE))
return false;
/*
* rfc 5661 2.6.3.1.1.6: don't bother erroring out a
* put-filehandle operation if we're not going to use the
* result:
*/
if (argp->opcnt == resp->opcnt)
return false;
if (next->opnum == OP_ILLEGAL)
return false;
nextd = OPDESC(next);
/*
* Rest of 2.6.3.1.1: certain operations will return WRONGSEC
* errors themselves as necessary; others should check for them
* now:
*/
return !(nextd->op_flags & OP_HANDLES_WRONGSEC);
}
static void svcxdr_init_encode(struct svc_rqst *rqstp,
struct nfsd4_compoundres *resp)
{
struct xdr_stream *xdr = &resp->xdr;
struct xdr_buf *buf = &rqstp->rq_res;
struct kvec *head = buf->head;
xdr->buf = buf;
xdr->iov = head;
xdr->p = head->iov_base + head->iov_len;
xdr->end = head->iov_base + PAGE_SIZE - rqstp->rq_auth_slack;
/* Tail and page_len should be zero at this point: */
buf->len = buf->head[0].iov_len;
xdr->scratch.iov_len = 0;
xdr->page_ptr = buf->pages - 1;
buf->buflen = PAGE_SIZE * (1 + rqstp->rq_page_end - buf->pages)
- rqstp->rq_auth_slack;
}
/*
* COMPOUND call.
*/
static __be32
nfsd4_proc_compound(struct svc_rqst *rqstp,
struct nfsd4_compoundargs *args,
struct nfsd4_compoundres *resp)
{
struct nfsd4_op *op;
struct nfsd4_operation *opdesc;
struct nfsd4_compound_state *cstate = &resp->cstate;
struct svc_fh *current_fh = &cstate->current_fh;
struct svc_fh *save_fh = &cstate->save_fh;
__be32 status;
svcxdr_init_encode(rqstp, resp);
resp->tagp = resp->xdr.p;
/* reserve space for: taglen, tag, and opcnt */
xdr_reserve_space(&resp->xdr, 8 + args->taglen);
resp->taglen = args->taglen;
resp->tag = args->tag;
resp->rqstp = rqstp;
cstate->minorversion = args->minorversion;
fh_init(current_fh, NFS4_FHSIZE);
fh_init(save_fh, NFS4_FHSIZE);
/*
* Don't use the deferral mechanism for NFSv4; compounds make it
* too hard to avoid non-idempotency problems.
*/
clear_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
/*
* According to RFC3010, this takes precedence over all other errors.
*/
status = nfserr_minor_vers_mismatch;
if (nfsd_minorversion(args->minorversion, NFSD_TEST) <= 0)
goto out;
status = nfs41_check_op_ordering(args);
if (status) {
op = &args->ops[0];
op->status = status;
goto encode_op;
}
while (!status && resp->opcnt < args->opcnt) {
op = &args->ops[resp->opcnt++];
dprintk("nfsv4 compound op #%d/%d: %d (%s)\n",
resp->opcnt, args->opcnt, op->opnum,
nfsd4_op_name(op->opnum));
/*
* The XDR decode routines may have pre-set op->status;
* for example, if there is a miscellaneous XDR error
* it will be set to nfserr_bad_xdr.
*/
if (op->status) {
if (op->opnum == OP_OPEN)
op->status = nfsd4_open_omfg(rqstp, cstate, op);
goto encode_op;
}
opdesc = OPDESC(op);
if (!current_fh->fh_dentry) {
if (!(opdesc->op_flags & ALLOWED_WITHOUT_FH)) {
op->status = nfserr_nofilehandle;
goto encode_op;
}
} else if (current_fh->fh_export->ex_fslocs.migrated &&
!(opdesc->op_flags & ALLOWED_ON_ABSENT_FS)) {
op->status = nfserr_moved;
goto encode_op;
}
fh_clear_wcc(current_fh);
/* If op is non-idempotent */
if (opdesc->op_flags & OP_MODIFIES_SOMETHING) {
/*
* Don't execute this op if we couldn't encode a
* succesful reply:
*/
u32 plen = opdesc->op_rsize_bop(rqstp, op);
/*
* Plus if there's another operation, make sure
* we'll have space to at least encode an error:
*/
if (resp->opcnt < args->opcnt)
plen += COMPOUND_ERR_SLACK_SPACE;
op->status = nfsd4_check_resp_size(resp, plen);
}
if (op->status)
goto encode_op;
if (opdesc->op_get_currentstateid)
opdesc->op_get_currentstateid(cstate, &op->u);
op->status = opdesc->op_func(rqstp, cstate, &op->u);
if (!op->status) {
if (opdesc->op_set_currentstateid)
opdesc->op_set_currentstateid(cstate, &op->u);
if (opdesc->op_flags & OP_CLEAR_STATEID)
clear_current_stateid(cstate);
if (need_wrongsec_check(rqstp))
op->status = check_nfsd_access(current_fh->fh_export, rqstp);
}
encode_op:
/* Only from SEQUENCE */
if (cstate->status == nfserr_replay_cache) {
dprintk("%s NFS4.1 replay from cache\n", __func__);
status = op->status;
goto out;
}
if (op->status == nfserr_replay_me) {
op->replay = &cstate->replay_owner->so_replay;
nfsd4_encode_replay(&resp->xdr, op);
status = op->status = op->replay->rp_status;
} else {
nfsd4_encode_operation(resp, op);
status = op->status;
}
dprintk("nfsv4 compound op %p opcnt %d #%d: %d: status %d\n",
args->ops, args->opcnt, resp->opcnt, op->opnum,
be32_to_cpu(status));
nfsd4_cstate_clear_replay(cstate);
nfsd4_increment_op_stats(op->opnum);
}
cstate->status = status;
fh_put(current_fh);
fh_put(save_fh);
BUG_ON(cstate->replay_owner);
out:
/* Reset deferral mechanism for RPC deferrals */
set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
dprintk("nfsv4 compound returned %d\n", ntohl(status));
return status;
}
#define op_encode_hdr_size (2)
#define op_encode_stateid_maxsz (XDR_QUADLEN(NFS4_STATEID_SIZE))
#define op_encode_verifier_maxsz (XDR_QUADLEN(NFS4_VERIFIER_SIZE))
#define op_encode_change_info_maxsz (5)
#define nfs4_fattr_bitmap_maxsz (4)
/* We'll fall back on returning no lockowner if run out of space: */
#define op_encode_lockowner_maxsz (0)
#define op_encode_lock_denied_maxsz (8 + op_encode_lockowner_maxsz)
#define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
#define op_encode_ace_maxsz (3 + nfs4_owner_maxsz)
#define op_encode_delegation_maxsz (1 + op_encode_stateid_maxsz + 1 + \
op_encode_ace_maxsz)
#define op_encode_channel_attrs_maxsz (6 + 1 + 1)
static inline u32 nfsd4_only_status_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size) * sizeof(__be32);
}
static inline u32 nfsd4_status_stateid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_stateid_maxsz)* sizeof(__be32);
}
static inline u32 nfsd4_access_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
/* ac_supported, ac_resp_access */
return (op_encode_hdr_size + 2)* sizeof(__be32);
}
static inline u32 nfsd4_commit_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_verifier_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_create_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz
+ nfs4_fattr_bitmap_maxsz) * sizeof(__be32);
}
/*
* Note since this is an idempotent operation we won't insist on failing
* the op prematurely if the estimate is too large. We may turn off splice
* reads unnecessarily.
*/
static inline u32 nfsd4_getattr_rsize(struct svc_rqst *rqstp,
struct nfsd4_op *op)
{
u32 *bmap = op->u.getattr.ga_bmval;
u32 bmap0 = bmap[0], bmap1 = bmap[1], bmap2 = bmap[2];
u32 ret = 0;
if (bmap0 & FATTR4_WORD0_ACL)
return svc_max_payload(rqstp);
if (bmap0 & FATTR4_WORD0_FS_LOCATIONS)
return svc_max_payload(rqstp);
if (bmap1 & FATTR4_WORD1_OWNER) {
ret += IDMAP_NAMESZ + 4;
bmap1 &= ~FATTR4_WORD1_OWNER;
}
if (bmap1 & FATTR4_WORD1_OWNER_GROUP) {
ret += IDMAP_NAMESZ + 4;
bmap1 &= ~FATTR4_WORD1_OWNER_GROUP;
}
if (bmap0 & FATTR4_WORD0_FILEHANDLE) {
ret += NFS4_FHSIZE + 4;
bmap0 &= ~FATTR4_WORD0_FILEHANDLE;
}
if (bmap2 & FATTR4_WORD2_SECURITY_LABEL) {
ret += NFS4_MAXLABELLEN + 12;
bmap2 &= ~FATTR4_WORD2_SECURITY_LABEL;
}
/*
* Largest of remaining attributes are 16 bytes (e.g.,
* supported_attributes)
*/
ret += 16 * (hweight32(bmap0) + hweight32(bmap1) + hweight32(bmap2));
/* bitmask, length */
ret += 20;
return ret;
}
static inline u32 nfsd4_getfh_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 1) * sizeof(__be32) + NFS4_FHSIZE;
}
static inline u32 nfsd4_link_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz)
* sizeof(__be32);
}
static inline u32 nfsd4_lock_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_lock_denied_maxsz)
* sizeof(__be32);
}
static inline u32 nfsd4_open_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_stateid_maxsz
+ op_encode_change_info_maxsz + 1
+ nfs4_fattr_bitmap_maxsz
+ op_encode_delegation_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_read_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
u32 maxcount = 0, rlen = 0;
maxcount = svc_max_payload(rqstp);
rlen = min(op->u.read.rd_length, maxcount);
return (op_encode_hdr_size + 2 + XDR_QUADLEN(rlen)) * sizeof(__be32);
}
static inline u32 nfsd4_readdir_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
u32 maxcount = 0, rlen = 0;
maxcount = svc_max_payload(rqstp);
rlen = min(op->u.readdir.rd_maxcount, maxcount);
return (op_encode_hdr_size + op_encode_verifier_maxsz +
XDR_QUADLEN(rlen)) * sizeof(__be32);
}
static inline u32 nfsd4_readlink_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 1) * sizeof(__be32) + PAGE_SIZE;
}
static inline u32 nfsd4_remove_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz)
* sizeof(__be32);
}
static inline u32 nfsd4_rename_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz
+ op_encode_change_info_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_sequence_rsize(struct svc_rqst *rqstp,
struct nfsd4_op *op)
{
return (op_encode_hdr_size
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) * sizeof(__be32);
}
static inline u32 nfsd4_test_stateid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 1 + op->u.test_stateid.ts_num_ids)
* sizeof(__be32);
}
static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + nfs4_fattr_bitmap_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_secinfo_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + RPC_AUTH_MAXFLAVOR *
(4 + XDR_QUADLEN(GSS_OID_MAX_LEN))) * sizeof(__be32);
}
static inline u32 nfsd4_setclientid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + XDR_QUADLEN(NFS4_VERIFIER_SIZE)) *
sizeof(__be32);
}
static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + op_encode_verifier_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\
1 + 1 + /* eir_flags, spr_how */\
4 + /* spo_must_enforce & _allow with bitmap */\
2 + /*eir_server_owner.so_minor_id */\
/* eir_server_owner.so_major_id<> */\
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
/* eir_server_scope<> */\
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
1 + /* eir_server_impl_id array length */\
0 /* ignored eir_server_impl_id contents */) * sizeof(__be32);
}
static inline u32 nfsd4_bind_conn_to_session_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + /* bctsr_sessid */\
2 /* bctsr_dir, use_conn_in_rdma_mode */) * sizeof(__be32);
}
static inline u32 nfsd4_create_session_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + /* sessionid */\
2 + /* csr_sequence, csr_flags */\
op_encode_channel_attrs_maxsz + \
op_encode_channel_attrs_maxsz) * sizeof(__be32);
}
static inline u32 nfsd4_copy_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* wr_callback */ +
op_encode_stateid_maxsz /* wr_callback */ +
2 /* wr_count */ +
1 /* wr_committed */ +
op_encode_verifier_maxsz +
1 /* cr_consecutive */ +
1 /* cr_synchronous */) * sizeof(__be32);
}
#ifdef CONFIG_NFSD_PNFS
static inline u32 nfsd4_getdeviceinfo_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
u32 maxcount = 0, rlen = 0;
maxcount = svc_max_payload(rqstp);
rlen = min(op->u.getdeviceinfo.gd_maxcount, maxcount);
return (op_encode_hdr_size +
1 /* gd_layout_type*/ +
XDR_QUADLEN(rlen) +
2 /* gd_notify_types */) * sizeof(__be32);
}
/*
* At this stage we don't really know what layout driver will handle the request,
* so we need to define an arbitrary upper bound here.
*/
#define MAX_LAYOUT_SIZE 128
static inline u32 nfsd4_layoutget_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* logr_return_on_close */ +
op_encode_stateid_maxsz +
1 /* nr of layouts */ +
MAX_LAYOUT_SIZE) * sizeof(__be32);
}
static inline u32 nfsd4_layoutcommit_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* locr_newsize */ +
2 /* ns_size */) * sizeof(__be32);
}
static inline u32 nfsd4_layoutreturn_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* lrs_stateid */ +
op_encode_stateid_maxsz) * sizeof(__be32);
}
#endif /* CONFIG_NFSD_PNFS */
static inline u32 nfsd4_seek_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 3) * sizeof(__be32);
}
static struct nfsd4_operation nfsd4_ops[] = {
[OP_ACCESS] = {
.op_func = (nfsd4op_func)nfsd4_access,
.op_name = "OP_ACCESS",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_access_rsize,
},
[OP_CLOSE] = {
.op_func = (nfsd4op_func)nfsd4_close,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_CLOSE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
.op_get_currentstateid = (stateid_getter)nfsd4_get_closestateid,
.op_set_currentstateid = (stateid_setter)nfsd4_set_closestateid,
},
[OP_COMMIT] = {
.op_func = (nfsd4op_func)nfsd4_commit,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_COMMIT",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_commit_rsize,
},
[OP_CREATE] = {
.op_func = (nfsd4op_func)nfsd4_create,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME | OP_CLEAR_STATEID,
.op_name = "OP_CREATE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_create_rsize,
},
[OP_DELEGRETURN] = {
.op_func = (nfsd4op_func)nfsd4_delegreturn,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_DELEGRETURN",
.op_rsize_bop = nfsd4_only_status_rsize,
.op_get_currentstateid = (stateid_getter)nfsd4_get_delegreturnstateid,
},
[OP_GETATTR] = {
.op_func = (nfsd4op_func)nfsd4_getattr,
.op_flags = ALLOWED_ON_ABSENT_FS,
.op_rsize_bop = nfsd4_getattr_rsize,
.op_name = "OP_GETATTR",
},
[OP_GETFH] = {
.op_func = (nfsd4op_func)nfsd4_getfh,
.op_name = "OP_GETFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_getfh_rsize,
},
[OP_LINK] = {
.op_func = (nfsd4op_func)nfsd4_link,
.op_flags = ALLOWED_ON_ABSENT_FS | OP_MODIFIES_SOMETHING
| OP_CACHEME,
.op_name = "OP_LINK",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_link_rsize,
},
[OP_LOCK] = {
.op_func = (nfsd4op_func)nfsd4_lock,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LOCK",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_lock_rsize,
.op_set_currentstateid = (stateid_setter)nfsd4_set_lockstateid,
},
[OP_LOCKT] = {
.op_func = (nfsd4op_func)nfsd4_lockt,
.op_name = "OP_LOCKT",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_lock_rsize,
},
[OP_LOCKU] = {
.op_func = (nfsd4op_func)nfsd4_locku,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LOCKU",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
.op_get_currentstateid = (stateid_getter)nfsd4_get_lockustateid,
},
[OP_LOOKUP] = {
.op_func = (nfsd4op_func)nfsd4_lookup,
.op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID,
.op_name = "OP_LOOKUP",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_LOOKUPP] = {
.op_func = (nfsd4op_func)nfsd4_lookupp,
.op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID,
.op_name = "OP_LOOKUPP",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_NVERIFY] = {
.op_func = (nfsd4op_func)nfsd4_nverify,
.op_name = "OP_NVERIFY",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_OPEN] = {
.op_func = (nfsd4op_func)nfsd4_open,
.op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_open_rsize,
.op_set_currentstateid = (stateid_setter)nfsd4_set_openstateid,
},
[OP_OPEN_CONFIRM] = {
.op_func = (nfsd4op_func)nfsd4_open_confirm,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN_CONFIRM",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
},
[OP_OPEN_DOWNGRADE] = {
.op_func = (nfsd4op_func)nfsd4_open_downgrade,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN_DOWNGRADE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
.op_get_currentstateid = (stateid_getter)nfsd4_get_opendowngradestateid,
.op_set_currentstateid = (stateid_setter)nfsd4_set_opendowngradestateid,
},
[OP_PUTFH] = {
.op_func = (nfsd4op_func)nfsd4_putfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_CLEAR_STATEID,
.op_name = "OP_PUTFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_PUTPUBFH] = {
.op_func = (nfsd4op_func)nfsd4_putrootfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_CLEAR_STATEID,
.op_name = "OP_PUTPUBFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_PUTROOTFH] = {
.op_func = (nfsd4op_func)nfsd4_putrootfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_CLEAR_STATEID,
.op_name = "OP_PUTROOTFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_READ] = {
.op_func = (nfsd4op_func)nfsd4_read,
.op_name = "OP_READ",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_read_rsize,
.op_get_currentstateid = (stateid_getter)nfsd4_get_readstateid,
},
[OP_READDIR] = {
.op_func = (nfsd4op_func)nfsd4_readdir,
.op_name = "OP_READDIR",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_readdir_rsize,
},
[OP_READLINK] = {
.op_func = (nfsd4op_func)nfsd4_readlink,
.op_name = "OP_READLINK",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_readlink_rsize,
},
[OP_REMOVE] = {
.op_func = (nfsd4op_func)nfsd4_remove,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_REMOVE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_remove_rsize,
},
[OP_RENAME] = {
.op_func = (nfsd4op_func)nfsd4_rename,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_RENAME",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_rename_rsize,
},
[OP_RENEW] = {
.op_func = (nfsd4op_func)nfsd4_renew,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING,
.op_name = "OP_RENEW",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_RESTOREFH] = {
.op_func = (nfsd4op_func)nfsd4_restorefh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
.op_name = "OP_RESTOREFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SAVEFH] = {
.op_func = (nfsd4op_func)nfsd4_savefh,
.op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
.op_name = "OP_SAVEFH",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SECINFO] = {
.op_func = (nfsd4op_func)nfsd4_secinfo,
.op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_SECINFO",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_secinfo_rsize,
},
[OP_SETATTR] = {
.op_func = (nfsd4op_func)nfsd4_setattr,
.op_name = "OP_SETATTR",
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_rsize_bop = (nfsd4op_rsize)nfsd4_setattr_rsize,
.op_get_currentstateid = (stateid_getter)nfsd4_get_setattrstateid,
},
[OP_SETCLIENTID] = {
.op_func = (nfsd4op_func)nfsd4_setclientid,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_SETCLIENTID",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_setclientid_rsize,
},
[OP_SETCLIENTID_CONFIRM] = {
.op_func = (nfsd4op_func)nfsd4_setclientid_confirm,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_SETCLIENTID_CONFIRM",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_VERIFY] = {
.op_func = (nfsd4op_func)nfsd4_verify,
.op_name = "OP_VERIFY",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_WRITE] = {
.op_func = (nfsd4op_func)nfsd4_write,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_WRITE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize,
.op_get_currentstateid = (stateid_getter)nfsd4_get_writestateid,
},
[OP_RELEASE_LOCKOWNER] = {
.op_func = (nfsd4op_func)nfsd4_release_lockowner,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
| OP_MODIFIES_SOMETHING,
.op_name = "OP_RELEASE_LOCKOWNER",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
/* NFSv4.1 operations */
[OP_EXCHANGE_ID] = {
.op_func = (nfsd4op_func)nfsd4_exchange_id,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_EXCHANGE_ID",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_exchange_id_rsize,
},
[OP_BACKCHANNEL_CTL] = {
.op_func = (nfsd4op_func)nfsd4_backchannel_ctl,
.op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_BACKCHANNEL_CTL",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_BIND_CONN_TO_SESSION] = {
.op_func = (nfsd4op_func)nfsd4_bind_conn_to_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_BIND_CONN_TO_SESSION",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_bind_conn_to_session_rsize,
},
[OP_CREATE_SESSION] = {
.op_func = (nfsd4op_func)nfsd4_create_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_CREATE_SESSION",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_create_session_rsize,
},
[OP_DESTROY_SESSION] = {
.op_func = (nfsd4op_func)nfsd4_destroy_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_DESTROY_SESSION",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SEQUENCE] = {
.op_func = (nfsd4op_func)nfsd4_sequence,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
.op_name = "OP_SEQUENCE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_sequence_rsize,
},
[OP_DESTROY_CLIENTID] = {
.op_func = (nfsd4op_func)nfsd4_destroy_clientid,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_DESTROY_CLIENTID",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_RECLAIM_COMPLETE] = {
.op_func = (nfsd4op_func)nfsd4_reclaim_complete,
.op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_RECLAIM_COMPLETE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SECINFO_NO_NAME] = {
.op_func = (nfsd4op_func)nfsd4_secinfo_no_name,
.op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_SECINFO_NO_NAME",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_secinfo_rsize,
},
[OP_TEST_STATEID] = {
.op_func = (nfsd4op_func)nfsd4_test_stateid,
.op_flags = ALLOWED_WITHOUT_FH,
.op_name = "OP_TEST_STATEID",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_test_stateid_rsize,
},
[OP_FREE_STATEID] = {
.op_func = (nfsd4op_func)nfsd4_free_stateid,
.op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_FREE_STATEID",
.op_get_currentstateid = (stateid_getter)nfsd4_get_freestateid,
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = {
.op_func = (nfsd4op_func)nfsd4_getdeviceinfo,
.op_flags = ALLOWED_WITHOUT_FH,
.op_name = "OP_GETDEVICEINFO",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_getdeviceinfo_rsize,
},
[OP_LAYOUTGET] = {
.op_func = (nfsd4op_func)nfsd4_layoutget,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LAYOUTGET",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutget_rsize,
},
[OP_LAYOUTCOMMIT] = {
.op_func = (nfsd4op_func)nfsd4_layoutcommit,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LAYOUTCOMMIT",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutcommit_rsize,
},
[OP_LAYOUTRETURN] = {
.op_func = (nfsd4op_func)nfsd4_layoutreturn,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LAYOUTRETURN",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutreturn_rsize,
},
#endif /* CONFIG_NFSD_PNFS */
/* NFSv4.2 operations */
[OP_ALLOCATE] = {
.op_func = (nfsd4op_func)nfsd4_allocate,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_ALLOCATE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_DEALLOCATE] = {
.op_func = (nfsd4op_func)nfsd4_deallocate,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_DEALLOCATE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_CLONE] = {
.op_func = (nfsd4op_func)nfsd4_clone,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_CLONE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_COPY] = {
.op_func = (nfsd4op_func)nfsd4_copy,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_COPY",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_copy_rsize,
},
[OP_SEEK] = {
.op_func = (nfsd4op_func)nfsd4_seek,
.op_name = "OP_SEEK",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_seek_rsize,
},
};
/**
* nfsd4_spo_must_allow - Determine if the compound op contains an
* operation that is allowed to be sent with machine credentials
*
* @rqstp: a pointer to the struct svc_rqst
*
* Checks to see if the compound contains a spo_must_allow op
* and confirms that it was sent with the proper machine creds.
*/
bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct nfsd4_compoundargs *argp = rqstp->rq_argp;
struct nfsd4_op *this = &argp->ops[resp->opcnt - 1];
struct nfsd4_compound_state *cstate = &resp->cstate;
struct nfs4_op_map *allow = &cstate->clp->cl_spo_must_allow;
u32 opiter;
if (!cstate->minorversion)
return false;
if (cstate->spo_must_allowed == true)
return true;
opiter = resp->opcnt;
while (opiter < argp->opcnt) {
this = &argp->ops[opiter++];
if (test_bit(this->opnum, allow->u.longs) &&
cstate->clp->cl_mach_cred &&
nfsd4_mach_creds_match(cstate->clp, rqstp)) {
cstate->spo_must_allowed = true;
return true;
}
}
cstate->spo_must_allowed = false;
return false;
}
int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
if (op->opnum == OP_ILLEGAL || op->status == nfserr_notsupp)
return op_encode_hdr_size * sizeof(__be32);
BUG_ON(OPDESC(op)->op_rsize_bop == NULL);
return OPDESC(op)->op_rsize_bop(rqstp, op);
}
void warn_on_nonidempotent_op(struct nfsd4_op *op)
{
if (OPDESC(op)->op_flags & OP_MODIFIES_SOMETHING) {
pr_err("unable to encode reply to nonidempotent op %d (%s)\n",
op->opnum, nfsd4_op_name(op->opnum));
WARN_ON_ONCE(1);
}
}
static const char *nfsd4_op_name(unsigned opnum)
{
if (opnum < ARRAY_SIZE(nfsd4_ops))
return nfsd4_ops[opnum].op_name;
return "unknown_operation";
}
#define nfsd4_voidres nfsd4_voidargs
struct nfsd4_voidargs { int dummy; };
static struct svc_procedure nfsd_procedures4[2] = {
[NFSPROC4_NULL] = {
.pc_func = (svc_procfunc) nfsd4_proc_null,
.pc_encode = (kxdrproc_t) nfs4svc_encode_voidres,
.pc_argsize = sizeof(struct nfsd4_voidargs),
.pc_ressize = sizeof(struct nfsd4_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 1,
},
[NFSPROC4_COMPOUND] = {
.pc_func = (svc_procfunc) nfsd4_proc_compound,
.pc_decode = (kxdrproc_t) nfs4svc_decode_compoundargs,
.pc_encode = (kxdrproc_t) nfs4svc_encode_compoundres,
.pc_argsize = sizeof(struct nfsd4_compoundargs),
.pc_ressize = sizeof(struct nfsd4_compoundres),
.pc_release = nfsd4_release_compoundargs,
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = NFSD_BUFSIZE/4,
},
};
struct svc_version nfsd_version4 = {
.vs_vers = 4,
.vs_nproc = 2,
.vs_proc = nfsd_procedures4,
.vs_dispatch = nfsd_dispatch,
.vs_xdrsize = NFS4_SVC_XDRSIZE,
.vs_rpcb_optnl = true,
.vs_need_cong_ctrl = true,
};
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| ./CrossVul/dataset_final_sorted/CWE-404/c/good_3351_4 |
crossvul-cpp_data_bad_3351_6 | /*
* Server-side XDR for NFSv4
*
* Copyright (c) 2002 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <kmsmith@umich.edu>
* Andy Adamson <andros@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/fs_struct.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/statfs.h>
#include <linux/utsname.h>
#include <linux/pagemap.h>
#include <linux/sunrpc/svcauth_gss.h>
#include "idmap.h"
#include "acl.h"
#include "xdr4.h"
#include "vfs.h"
#include "state.h"
#include "cache.h"
#include "netns.h"
#include "pnfs.h"
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
#include <linux/security.h>
#endif
#define NFSDDBG_FACILITY NFSDDBG_XDR
const u32 nfsd_suppattrs[3][3] = {
{NFSD4_SUPPORTED_ATTRS_WORD0,
NFSD4_SUPPORTED_ATTRS_WORD1,
NFSD4_SUPPORTED_ATTRS_WORD2},
{NFSD4_1_SUPPORTED_ATTRS_WORD0,
NFSD4_1_SUPPORTED_ATTRS_WORD1,
NFSD4_1_SUPPORTED_ATTRS_WORD2},
{NFSD4_1_SUPPORTED_ATTRS_WORD0,
NFSD4_1_SUPPORTED_ATTRS_WORD1,
NFSD4_2_SUPPORTED_ATTRS_WORD2},
};
/*
* As per referral draft, the fsid for a referral MUST be different from the fsid of the containing
* directory in order to indicate to the client that a filesystem boundary is present
* We use a fixed fsid for a referral
*/
#define NFS4_REFERRAL_FSID_MAJOR 0x8000000ULL
#define NFS4_REFERRAL_FSID_MINOR 0x8000000ULL
static __be32
check_filename(char *str, int len)
{
int i;
if (len == 0)
return nfserr_inval;
if (isdotent(str, len))
return nfserr_badname;
for (i = 0; i < len; i++)
if (str[i] == '/')
return nfserr_badname;
return 0;
}
#define DECODE_HEAD \
__be32 *p; \
__be32 status
#define DECODE_TAIL \
status = 0; \
out: \
return status; \
xdr_error: \
dprintk("NFSD: xdr error (%s:%d)\n", \
__FILE__, __LINE__); \
status = nfserr_bad_xdr; \
goto out
#define READMEM(x,nbytes) do { \
x = (char *)p; \
p += XDR_QUADLEN(nbytes); \
} while (0)
#define SAVEMEM(x,nbytes) do { \
if (!(x = (p==argp->tmp || p == argp->tmpp) ? \
savemem(argp, p, nbytes) : \
(char *)p)) { \
dprintk("NFSD: xdr error (%s:%d)\n", \
__FILE__, __LINE__); \
goto xdr_error; \
} \
p += XDR_QUADLEN(nbytes); \
} while (0)
#define COPYMEM(x,nbytes) do { \
memcpy((x), p, nbytes); \
p += XDR_QUADLEN(nbytes); \
} while (0)
/* READ_BUF, read_buf(): nbytes must be <= PAGE_SIZE */
#define READ_BUF(nbytes) do { \
if (nbytes <= (u32)((char *)argp->end - (char *)argp->p)) { \
p = argp->p; \
argp->p += XDR_QUADLEN(nbytes); \
} else if (!(p = read_buf(argp, nbytes))) { \
dprintk("NFSD: xdr error (%s:%d)\n", \
__FILE__, __LINE__); \
goto xdr_error; \
} \
} while (0)
static void next_decode_page(struct nfsd4_compoundargs *argp)
{
argp->p = page_address(argp->pagelist[0]);
argp->pagelist++;
if (argp->pagelen < PAGE_SIZE) {
argp->end = argp->p + (argp->pagelen>>2);
argp->pagelen = 0;
} else {
argp->end = argp->p + (PAGE_SIZE>>2);
argp->pagelen -= PAGE_SIZE;
}
}
static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
{
/* We want more bytes than seem to be available.
* Maybe we need a new page, maybe we have just run out
*/
unsigned int avail = (char *)argp->end - (char *)argp->p;
__be32 *p;
if (avail + argp->pagelen < nbytes)
return NULL;
if (avail + PAGE_SIZE < nbytes) /* need more than a page !! */
return NULL;
/* ok, we can do it with the current plus the next page */
if (nbytes <= sizeof(argp->tmp))
p = argp->tmp;
else {
kfree(argp->tmpp);
p = argp->tmpp = kmalloc(nbytes, GFP_KERNEL);
if (!p)
return NULL;
}
/*
* The following memcpy is safe because read_buf is always
* called with nbytes > avail, and the two cases above both
* guarantee p points to at least nbytes bytes.
*/
memcpy(p, argp->p, avail);
next_decode_page(argp);
memcpy(((char*)p)+avail, argp->p, (nbytes - avail));
argp->p += XDR_QUADLEN(nbytes - avail);
return p;
}
static int zero_clientid(clientid_t *clid)
{
return (clid->cl_boot == 0) && (clid->cl_id == 0);
}
/**
* svcxdr_tmpalloc - allocate memory to be freed after compound processing
* @argp: NFSv4 compound argument structure
* @p: pointer to be freed (with kfree())
*
* Marks @p to be freed when processing the compound operation
* described in @argp finishes.
*/
static void *
svcxdr_tmpalloc(struct nfsd4_compoundargs *argp, u32 len)
{
struct svcxdr_tmpbuf *tb;
tb = kmalloc(sizeof(*tb) + len, GFP_KERNEL);
if (!tb)
return NULL;
tb->next = argp->to_free;
argp->to_free = tb;
return tb->buf;
}
/*
* For xdr strings that need to be passed to other kernel api's
* as null-terminated strings.
*
* Note null-terminating in place usually isn't safe since the
* buffer might end on a page boundary.
*/
static char *
svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, u32 len)
{
char *p = svcxdr_tmpalloc(argp, len + 1);
if (!p)
return NULL;
memcpy(p, buf, len);
p[len] = '\0';
return p;
}
/**
* savemem - duplicate a chunk of memory for later processing
* @argp: NFSv4 compound argument structure to be freed with
* @p: pointer to be duplicated
* @nbytes: length to be duplicated
*
* Returns a pointer to a copy of @nbytes bytes of memory at @p
* that are preserved until processing of the NFSv4 compound
* operation described by @argp finishes.
*/
static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes)
{
void *ret;
ret = svcxdr_tmpalloc(argp, nbytes);
if (!ret)
return NULL;
memcpy(ret, p, nbytes);
return ret;
}
/*
* We require the high 32 bits of 'seconds' to be 0, and
* we ignore all 32 bits of 'nseconds'.
*/
static __be32
nfsd4_decode_time(struct nfsd4_compoundargs *argp, struct timespec *tv)
{
DECODE_HEAD;
u64 sec;
READ_BUF(12);
p = xdr_decode_hyper(p, &sec);
tv->tv_sec = sec;
tv->tv_nsec = be32_to_cpup(p++);
if (tv->tv_nsec >= (u32)1000000000)
return nfserr_inval;
DECODE_TAIL;
}
static __be32
nfsd4_decode_bitmap(struct nfsd4_compoundargs *argp, u32 *bmval)
{
u32 bmlen;
DECODE_HEAD;
bmval[0] = 0;
bmval[1] = 0;
bmval[2] = 0;
READ_BUF(4);
bmlen = be32_to_cpup(p++);
if (bmlen > 1000)
goto xdr_error;
READ_BUF(bmlen << 2);
if (bmlen > 0)
bmval[0] = be32_to_cpup(p++);
if (bmlen > 1)
bmval[1] = be32_to_cpup(p++);
if (bmlen > 2)
bmval[2] = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
struct iattr *iattr, struct nfs4_acl **acl,
struct xdr_netobj *label, int *umask)
{
int expected_len, len = 0;
u32 dummy32;
char *buf;
DECODE_HEAD;
iattr->ia_valid = 0;
if ((status = nfsd4_decode_bitmap(argp, bmval)))
return status;
if (bmval[0] & ~NFSD_WRITEABLE_ATTRS_WORD0
|| bmval[1] & ~NFSD_WRITEABLE_ATTRS_WORD1
|| bmval[2] & ~NFSD_WRITEABLE_ATTRS_WORD2) {
if (nfsd_attrs_supported(argp->minorversion, bmval))
return nfserr_inval;
return nfserr_attrnotsupp;
}
READ_BUF(4);
expected_len = be32_to_cpup(p++);
if (bmval[0] & FATTR4_WORD0_SIZE) {
READ_BUF(8);
len += 8;
p = xdr_decode_hyper(p, &iattr->ia_size);
iattr->ia_valid |= ATTR_SIZE;
}
if (bmval[0] & FATTR4_WORD0_ACL) {
u32 nace;
struct nfs4_ace *ace;
READ_BUF(4); len += 4;
nace = be32_to_cpup(p++);
if (nace > NFS4_ACL_MAX)
return nfserr_fbig;
*acl = svcxdr_tmpalloc(argp, nfs4_acl_bytes(nace));
if (*acl == NULL)
return nfserr_jukebox;
(*acl)->naces = nace;
for (ace = (*acl)->aces; ace < (*acl)->aces + nace; ace++) {
READ_BUF(16); len += 16;
ace->type = be32_to_cpup(p++);
ace->flag = be32_to_cpup(p++);
ace->access_mask = be32_to_cpup(p++);
dummy32 = be32_to_cpup(p++);
READ_BUF(dummy32);
len += XDR_QUADLEN(dummy32) << 2;
READMEM(buf, dummy32);
ace->whotype = nfs4_acl_get_whotype(buf, dummy32);
status = nfs_ok;
if (ace->whotype != NFS4_ACL_WHO_NAMED)
;
else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
status = nfsd_map_name_to_gid(argp->rqstp,
buf, dummy32, &ace->who_gid);
else
status = nfsd_map_name_to_uid(argp->rqstp,
buf, dummy32, &ace->who_uid);
if (status)
return status;
}
} else
*acl = NULL;
if (bmval[1] & FATTR4_WORD1_MODE) {
READ_BUF(4);
len += 4;
iattr->ia_mode = be32_to_cpup(p++);
iattr->ia_mode &= (S_IFMT | S_IALLUGO);
iattr->ia_valid |= ATTR_MODE;
}
if (bmval[1] & FATTR4_WORD1_OWNER) {
READ_BUF(4);
len += 4;
dummy32 = be32_to_cpup(p++);
READ_BUF(dummy32);
len += (XDR_QUADLEN(dummy32) << 2);
READMEM(buf, dummy32);
if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
return status;
iattr->ia_valid |= ATTR_UID;
}
if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) {
READ_BUF(4);
len += 4;
dummy32 = be32_to_cpup(p++);
READ_BUF(dummy32);
len += (XDR_QUADLEN(dummy32) << 2);
READMEM(buf, dummy32);
if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
return status;
iattr->ia_valid |= ATTR_GID;
}
if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
READ_BUF(4);
len += 4;
dummy32 = be32_to_cpup(p++);
switch (dummy32) {
case NFS4_SET_TO_CLIENT_TIME:
len += 12;
status = nfsd4_decode_time(argp, &iattr->ia_atime);
if (status)
return status;
iattr->ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
break;
case NFS4_SET_TO_SERVER_TIME:
iattr->ia_valid |= ATTR_ATIME;
break;
default:
goto xdr_error;
}
}
if (bmval[1] & FATTR4_WORD1_TIME_MODIFY_SET) {
READ_BUF(4);
len += 4;
dummy32 = be32_to_cpup(p++);
switch (dummy32) {
case NFS4_SET_TO_CLIENT_TIME:
len += 12;
status = nfsd4_decode_time(argp, &iattr->ia_mtime);
if (status)
return status;
iattr->ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
break;
case NFS4_SET_TO_SERVER_TIME:
iattr->ia_valid |= ATTR_MTIME;
break;
default:
goto xdr_error;
}
}
label->len = 0;
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
READ_BUF(4);
len += 4;
dummy32 = be32_to_cpup(p++); /* lfs: we don't use it */
READ_BUF(4);
len += 4;
dummy32 = be32_to_cpup(p++); /* pi: we don't use it either */
READ_BUF(4);
len += 4;
dummy32 = be32_to_cpup(p++);
READ_BUF(dummy32);
if (dummy32 > NFS4_MAXLABELLEN)
return nfserr_badlabel;
len += (XDR_QUADLEN(dummy32) << 2);
READMEM(buf, dummy32);
label->len = dummy32;
label->data = svcxdr_dupstr(argp, buf, dummy32);
if (!label->data)
return nfserr_jukebox;
}
#endif
if (bmval[2] & FATTR4_WORD2_MODE_UMASK) {
if (!umask)
goto xdr_error;
READ_BUF(8);
len += 8;
dummy32 = be32_to_cpup(p++);
iattr->ia_mode = dummy32 & (S_IFMT | S_IALLUGO);
dummy32 = be32_to_cpup(p++);
*umask = dummy32 & S_IRWXUGO;
iattr->ia_valid |= ATTR_MODE;
}
if (len != expected_len)
goto xdr_error;
DECODE_TAIL;
}
static __be32
nfsd4_decode_stateid(struct nfsd4_compoundargs *argp, stateid_t *sid)
{
DECODE_HEAD;
READ_BUF(sizeof(stateid_t));
sid->si_generation = be32_to_cpup(p++);
COPYMEM(&sid->si_opaque, sizeof(stateid_opaque_t));
DECODE_TAIL;
}
static __be32
nfsd4_decode_access(struct nfsd4_compoundargs *argp, struct nfsd4_access *access)
{
DECODE_HEAD;
READ_BUF(4);
access->ac_req_access = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32 nfsd4_decode_cb_sec(struct nfsd4_compoundargs *argp, struct nfsd4_cb_sec *cbs)
{
DECODE_HEAD;
u32 dummy, uid, gid;
char *machine_name;
int i;
int nr_secflavs;
/* callback_sec_params4 */
READ_BUF(4);
nr_secflavs = be32_to_cpup(p++);
if (nr_secflavs)
cbs->flavor = (u32)(-1);
else
/* Is this legal? Be generous, take it to mean AUTH_NONE: */
cbs->flavor = 0;
for (i = 0; i < nr_secflavs; ++i) {
READ_BUF(4);
dummy = be32_to_cpup(p++);
switch (dummy) {
case RPC_AUTH_NULL:
/* Nothing to read */
if (cbs->flavor == (u32)(-1))
cbs->flavor = RPC_AUTH_NULL;
break;
case RPC_AUTH_UNIX:
READ_BUF(8);
/* stamp */
dummy = be32_to_cpup(p++);
/* machine name */
dummy = be32_to_cpup(p++);
READ_BUF(dummy);
SAVEMEM(machine_name, dummy);
/* uid, gid */
READ_BUF(8);
uid = be32_to_cpup(p++);
gid = be32_to_cpup(p++);
/* more gids */
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy * 4);
if (cbs->flavor == (u32)(-1)) {
kuid_t kuid = make_kuid(&init_user_ns, uid);
kgid_t kgid = make_kgid(&init_user_ns, gid);
if (uid_valid(kuid) && gid_valid(kgid)) {
cbs->uid = kuid;
cbs->gid = kgid;
cbs->flavor = RPC_AUTH_UNIX;
} else {
dprintk("RPC_AUTH_UNIX with invalid"
"uid or gid ignoring!\n");
}
}
break;
case RPC_AUTH_GSS:
dprintk("RPC_AUTH_GSS callback secflavor "
"not supported!\n");
READ_BUF(8);
/* gcbp_service */
dummy = be32_to_cpup(p++);
/* gcbp_handle_from_server */
dummy = be32_to_cpup(p++);
READ_BUF(dummy);
p += XDR_QUADLEN(dummy);
/* gcbp_handle_from_client */
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy);
break;
default:
dprintk("Illegal callback secflavor\n");
return nfserr_inval;
}
}
DECODE_TAIL;
}
static __be32 nfsd4_decode_backchannel_ctl(struct nfsd4_compoundargs *argp, struct nfsd4_backchannel_ctl *bc)
{
DECODE_HEAD;
READ_BUF(4);
bc->bc_cb_program = be32_to_cpup(p++);
nfsd4_decode_cb_sec(argp, &bc->bc_cb_sec);
DECODE_TAIL;
}
static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp, struct nfsd4_bind_conn_to_session *bcts)
{
DECODE_HEAD;
READ_BUF(NFS4_MAX_SESSIONID_LEN + 8);
COPYMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN);
bcts->dir = be32_to_cpup(p++);
/* XXX: skipping ctsa_use_conn_in_rdma_mode. Perhaps Tom Tucker
* could help us figure out we should be using it. */
DECODE_TAIL;
}
static __be32
nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
{
DECODE_HEAD;
READ_BUF(4);
close->cl_seqid = be32_to_cpup(p++);
return nfsd4_decode_stateid(argp, &close->cl_stateid);
DECODE_TAIL;
}
static __be32
nfsd4_decode_commit(struct nfsd4_compoundargs *argp, struct nfsd4_commit *commit)
{
DECODE_HEAD;
READ_BUF(12);
p = xdr_decode_hyper(p, &commit->co_offset);
commit->co_count = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create)
{
DECODE_HEAD;
READ_BUF(4);
create->cr_type = be32_to_cpup(p++);
switch (create->cr_type) {
case NF4LNK:
READ_BUF(4);
create->cr_datalen = be32_to_cpup(p++);
READ_BUF(create->cr_datalen);
create->cr_data = svcxdr_dupstr(argp, p, create->cr_datalen);
if (!create->cr_data)
return nfserr_jukebox;
break;
case NF4BLK:
case NF4CHR:
READ_BUF(8);
create->cr_specdata1 = be32_to_cpup(p++);
create->cr_specdata2 = be32_to_cpup(p++);
break;
case NF4SOCK:
case NF4FIFO:
case NF4DIR:
default:
break;
}
READ_BUF(4);
create->cr_namelen = be32_to_cpup(p++);
READ_BUF(create->cr_namelen);
SAVEMEM(create->cr_name, create->cr_namelen);
if ((status = check_filename(create->cr_name, create->cr_namelen)))
return status;
status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr,
&create->cr_acl, &create->cr_label,
¤t->fs->umask);
if (status)
goto out;
DECODE_TAIL;
}
static inline __be32
nfsd4_decode_delegreturn(struct nfsd4_compoundargs *argp, struct nfsd4_delegreturn *dr)
{
return nfsd4_decode_stateid(argp, &dr->dr_stateid);
}
static inline __be32
nfsd4_decode_getattr(struct nfsd4_compoundargs *argp, struct nfsd4_getattr *getattr)
{
return nfsd4_decode_bitmap(argp, getattr->ga_bmval);
}
static __be32
nfsd4_decode_link(struct nfsd4_compoundargs *argp, struct nfsd4_link *link)
{
DECODE_HEAD;
READ_BUF(4);
link->li_namelen = be32_to_cpup(p++);
READ_BUF(link->li_namelen);
SAVEMEM(link->li_name, link->li_namelen);
if ((status = check_filename(link->li_name, link->li_namelen)))
return status;
DECODE_TAIL;
}
static __be32
nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
{
DECODE_HEAD;
/*
* type, reclaim(boolean), offset, length, new_lock_owner(boolean)
*/
READ_BUF(28);
lock->lk_type = be32_to_cpup(p++);
if ((lock->lk_type < NFS4_READ_LT) || (lock->lk_type > NFS4_WRITEW_LT))
goto xdr_error;
lock->lk_reclaim = be32_to_cpup(p++);
p = xdr_decode_hyper(p, &lock->lk_offset);
p = xdr_decode_hyper(p, &lock->lk_length);
lock->lk_is_new = be32_to_cpup(p++);
if (lock->lk_is_new) {
READ_BUF(4);
lock->lk_new_open_seqid = be32_to_cpup(p++);
status = nfsd4_decode_stateid(argp, &lock->lk_new_open_stateid);
if (status)
return status;
READ_BUF(8 + sizeof(clientid_t));
lock->lk_new_lock_seqid = be32_to_cpup(p++);
COPYMEM(&lock->lk_new_clientid, sizeof(clientid_t));
lock->lk_new_owner.len = be32_to_cpup(p++);
READ_BUF(lock->lk_new_owner.len);
READMEM(lock->lk_new_owner.data, lock->lk_new_owner.len);
} else {
status = nfsd4_decode_stateid(argp, &lock->lk_old_lock_stateid);
if (status)
return status;
READ_BUF(4);
lock->lk_old_lock_seqid = be32_to_cpup(p++);
}
DECODE_TAIL;
}
static __be32
nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, struct nfsd4_lockt *lockt)
{
DECODE_HEAD;
READ_BUF(32);
lockt->lt_type = be32_to_cpup(p++);
if((lockt->lt_type < NFS4_READ_LT) || (lockt->lt_type > NFS4_WRITEW_LT))
goto xdr_error;
p = xdr_decode_hyper(p, &lockt->lt_offset);
p = xdr_decode_hyper(p, &lockt->lt_length);
COPYMEM(&lockt->lt_clientid, 8);
lockt->lt_owner.len = be32_to_cpup(p++);
READ_BUF(lockt->lt_owner.len);
READMEM(lockt->lt_owner.data, lockt->lt_owner.len);
DECODE_TAIL;
}
static __be32
nfsd4_decode_locku(struct nfsd4_compoundargs *argp, struct nfsd4_locku *locku)
{
DECODE_HEAD;
READ_BUF(8);
locku->lu_type = be32_to_cpup(p++);
if ((locku->lu_type < NFS4_READ_LT) || (locku->lu_type > NFS4_WRITEW_LT))
goto xdr_error;
locku->lu_seqid = be32_to_cpup(p++);
status = nfsd4_decode_stateid(argp, &locku->lu_stateid);
if (status)
return status;
READ_BUF(16);
p = xdr_decode_hyper(p, &locku->lu_offset);
p = xdr_decode_hyper(p, &locku->lu_length);
DECODE_TAIL;
}
static __be32
nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, struct nfsd4_lookup *lookup)
{
DECODE_HEAD;
READ_BUF(4);
lookup->lo_len = be32_to_cpup(p++);
READ_BUF(lookup->lo_len);
SAVEMEM(lookup->lo_name, lookup->lo_len);
if ((status = check_filename(lookup->lo_name, lookup->lo_len)))
return status;
DECODE_TAIL;
}
static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *share_access, u32 *deleg_want, u32 *deleg_when)
{
__be32 *p;
u32 w;
READ_BUF(4);
w = be32_to_cpup(p++);
*share_access = w & NFS4_SHARE_ACCESS_MASK;
*deleg_want = w & NFS4_SHARE_WANT_MASK;
if (deleg_when)
*deleg_when = w & NFS4_SHARE_WHEN_MASK;
switch (w & NFS4_SHARE_ACCESS_MASK) {
case NFS4_SHARE_ACCESS_READ:
case NFS4_SHARE_ACCESS_WRITE:
case NFS4_SHARE_ACCESS_BOTH:
break;
default:
return nfserr_bad_xdr;
}
w &= ~NFS4_SHARE_ACCESS_MASK;
if (!w)
return nfs_ok;
if (!argp->minorversion)
return nfserr_bad_xdr;
switch (w & NFS4_SHARE_WANT_MASK) {
case NFS4_SHARE_WANT_NO_PREFERENCE:
case NFS4_SHARE_WANT_READ_DELEG:
case NFS4_SHARE_WANT_WRITE_DELEG:
case NFS4_SHARE_WANT_ANY_DELEG:
case NFS4_SHARE_WANT_NO_DELEG:
case NFS4_SHARE_WANT_CANCEL:
break;
default:
return nfserr_bad_xdr;
}
w &= ~NFS4_SHARE_WANT_MASK;
if (!w)
return nfs_ok;
if (!deleg_when) /* open_downgrade */
return nfserr_inval;
switch (w) {
case NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL:
case NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED:
case (NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL |
NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED):
return nfs_ok;
}
xdr_error:
return nfserr_bad_xdr;
}
static __be32 nfsd4_decode_share_deny(struct nfsd4_compoundargs *argp, u32 *x)
{
__be32 *p;
READ_BUF(4);
*x = be32_to_cpup(p++);
/* Note: unlinke access bits, deny bits may be zero. */
if (*x & ~NFS4_SHARE_DENY_BOTH)
return nfserr_bad_xdr;
return nfs_ok;
xdr_error:
return nfserr_bad_xdr;
}
static __be32 nfsd4_decode_opaque(struct nfsd4_compoundargs *argp, struct xdr_netobj *o)
{
__be32 *p;
READ_BUF(4);
o->len = be32_to_cpup(p++);
if (o->len == 0 || o->len > NFS4_OPAQUE_LIMIT)
return nfserr_bad_xdr;
READ_BUF(o->len);
SAVEMEM(o->data, o->len);
return nfs_ok;
xdr_error:
return nfserr_bad_xdr;
}
static __be32
nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
{
DECODE_HEAD;
u32 dummy;
memset(open->op_bmval, 0, sizeof(open->op_bmval));
open->op_iattr.ia_valid = 0;
open->op_openowner = NULL;
open->op_xdr_error = 0;
/* seqid, share_access, share_deny, clientid, ownerlen */
READ_BUF(4);
open->op_seqid = be32_to_cpup(p++);
/* decode, yet ignore deleg_when until supported */
status = nfsd4_decode_share_access(argp, &open->op_share_access,
&open->op_deleg_want, &dummy);
if (status)
goto xdr_error;
status = nfsd4_decode_share_deny(argp, &open->op_share_deny);
if (status)
goto xdr_error;
READ_BUF(sizeof(clientid_t));
COPYMEM(&open->op_clientid, sizeof(clientid_t));
status = nfsd4_decode_opaque(argp, &open->op_owner);
if (status)
goto xdr_error;
READ_BUF(4);
open->op_create = be32_to_cpup(p++);
switch (open->op_create) {
case NFS4_OPEN_NOCREATE:
break;
case NFS4_OPEN_CREATE:
current->fs->umask = 0;
READ_BUF(4);
open->op_createmode = be32_to_cpup(p++);
switch (open->op_createmode) {
case NFS4_CREATE_UNCHECKED:
case NFS4_CREATE_GUARDED:
status = nfsd4_decode_fattr(argp, open->op_bmval,
&open->op_iattr, &open->op_acl, &open->op_label,
¤t->fs->umask);
if (status)
goto out;
break;
case NFS4_CREATE_EXCLUSIVE:
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
break;
case NFS4_CREATE_EXCLUSIVE4_1:
if (argp->minorversion < 1)
goto xdr_error;
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
status = nfsd4_decode_fattr(argp, open->op_bmval,
&open->op_iattr, &open->op_acl, &open->op_label,
¤t->fs->umask);
if (status)
goto out;
break;
default:
goto xdr_error;
}
break;
default:
goto xdr_error;
}
/* open_claim */
READ_BUF(4);
open->op_claim_type = be32_to_cpup(p++);
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_NULL:
case NFS4_OPEN_CLAIM_DELEGATE_PREV:
READ_BUF(4);
open->op_fname.len = be32_to_cpup(p++);
READ_BUF(open->op_fname.len);
SAVEMEM(open->op_fname.data, open->op_fname.len);
if ((status = check_filename(open->op_fname.data, open->op_fname.len)))
return status;
break;
case NFS4_OPEN_CLAIM_PREVIOUS:
READ_BUF(4);
open->op_delegate_type = be32_to_cpup(p++);
break;
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
status = nfsd4_decode_stateid(argp, &open->op_delegate_stateid);
if (status)
return status;
READ_BUF(4);
open->op_fname.len = be32_to_cpup(p++);
READ_BUF(open->op_fname.len);
SAVEMEM(open->op_fname.data, open->op_fname.len);
if ((status = check_filename(open->op_fname.data, open->op_fname.len)))
return status;
break;
case NFS4_OPEN_CLAIM_FH:
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
if (argp->minorversion < 1)
goto xdr_error;
/* void */
break;
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
if (argp->minorversion < 1)
goto xdr_error;
status = nfsd4_decode_stateid(argp, &open->op_delegate_stateid);
if (status)
return status;
break;
default:
goto xdr_error;
}
DECODE_TAIL;
}
static __be32
nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_confirm *open_conf)
{
DECODE_HEAD;
if (argp->minorversion >= 1)
return nfserr_notsupp;
status = nfsd4_decode_stateid(argp, &open_conf->oc_req_stateid);
if (status)
return status;
READ_BUF(4);
open_conf->oc_seqid = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_downgrade *open_down)
{
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &open_down->od_stateid);
if (status)
return status;
READ_BUF(4);
open_down->od_seqid = be32_to_cpup(p++);
status = nfsd4_decode_share_access(argp, &open_down->od_share_access,
&open_down->od_deleg_want, NULL);
if (status)
return status;
status = nfsd4_decode_share_deny(argp, &open_down->od_share_deny);
if (status)
return status;
DECODE_TAIL;
}
static __be32
nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh)
{
DECODE_HEAD;
READ_BUF(4);
putfh->pf_fhlen = be32_to_cpup(p++);
if (putfh->pf_fhlen > NFS4_FHSIZE)
goto xdr_error;
READ_BUF(putfh->pf_fhlen);
SAVEMEM(putfh->pf_fhval, putfh->pf_fhlen);
DECODE_TAIL;
}
static __be32
nfsd4_decode_putpubfh(struct nfsd4_compoundargs *argp, void *p)
{
if (argp->minorversion == 0)
return nfs_ok;
return nfserr_notsupp;
}
static __be32
nfsd4_decode_read(struct nfsd4_compoundargs *argp, struct nfsd4_read *read)
{
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &read->rd_stateid);
if (status)
return status;
READ_BUF(12);
p = xdr_decode_hyper(p, &read->rd_offset);
read->rd_length = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_readdir(struct nfsd4_compoundargs *argp, struct nfsd4_readdir *readdir)
{
DECODE_HEAD;
READ_BUF(24);
p = xdr_decode_hyper(p, &readdir->rd_cookie);
COPYMEM(readdir->rd_verf.data, sizeof(readdir->rd_verf.data));
readdir->rd_dircount = be32_to_cpup(p++);
readdir->rd_maxcount = be32_to_cpup(p++);
if ((status = nfsd4_decode_bitmap(argp, readdir->rd_bmval)))
goto out;
DECODE_TAIL;
}
static __be32
nfsd4_decode_remove(struct nfsd4_compoundargs *argp, struct nfsd4_remove *remove)
{
DECODE_HEAD;
READ_BUF(4);
remove->rm_namelen = be32_to_cpup(p++);
READ_BUF(remove->rm_namelen);
SAVEMEM(remove->rm_name, remove->rm_namelen);
if ((status = check_filename(remove->rm_name, remove->rm_namelen)))
return status;
DECODE_TAIL;
}
static __be32
nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename)
{
DECODE_HEAD;
READ_BUF(4);
rename->rn_snamelen = be32_to_cpup(p++);
READ_BUF(rename->rn_snamelen);
SAVEMEM(rename->rn_sname, rename->rn_snamelen);
READ_BUF(4);
rename->rn_tnamelen = be32_to_cpup(p++);
READ_BUF(rename->rn_tnamelen);
SAVEMEM(rename->rn_tname, rename->rn_tnamelen);
if ((status = check_filename(rename->rn_sname, rename->rn_snamelen)))
return status;
if ((status = check_filename(rename->rn_tname, rename->rn_tnamelen)))
return status;
DECODE_TAIL;
}
static __be32
nfsd4_decode_renew(struct nfsd4_compoundargs *argp, clientid_t *clientid)
{
DECODE_HEAD;
if (argp->minorversion >= 1)
return nfserr_notsupp;
READ_BUF(sizeof(clientid_t));
COPYMEM(clientid, sizeof(clientid_t));
DECODE_TAIL;
}
static __be32
nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp,
struct nfsd4_secinfo *secinfo)
{
DECODE_HEAD;
READ_BUF(4);
secinfo->si_namelen = be32_to_cpup(p++);
READ_BUF(secinfo->si_namelen);
SAVEMEM(secinfo->si_name, secinfo->si_namelen);
status = check_filename(secinfo->si_name, secinfo->si_namelen);
if (status)
return status;
DECODE_TAIL;
}
static __be32
nfsd4_decode_secinfo_no_name(struct nfsd4_compoundargs *argp,
struct nfsd4_secinfo_no_name *sin)
{
DECODE_HEAD;
READ_BUF(4);
sin->sin_style = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr)
{
__be32 status;
status = nfsd4_decode_stateid(argp, &setattr->sa_stateid);
if (status)
return status;
return nfsd4_decode_fattr(argp, setattr->sa_bmval, &setattr->sa_iattr,
&setattr->sa_acl, &setattr->sa_label, NULL);
}
static __be32
nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid *setclientid)
{
DECODE_HEAD;
if (argp->minorversion >= 1)
return nfserr_notsupp;
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(setclientid->se_verf.data, NFS4_VERIFIER_SIZE);
status = nfsd4_decode_opaque(argp, &setclientid->se_name);
if (status)
return nfserr_bad_xdr;
READ_BUF(8);
setclientid->se_callback_prog = be32_to_cpup(p++);
setclientid->se_callback_netid_len = be32_to_cpup(p++);
READ_BUF(setclientid->se_callback_netid_len);
SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len);
READ_BUF(4);
setclientid->se_callback_addr_len = be32_to_cpup(p++);
READ_BUF(setclientid->se_callback_addr_len);
SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len);
READ_BUF(4);
setclientid->se_callback_ident = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid_confirm *scd_c)
{
DECODE_HEAD;
if (argp->minorversion >= 1)
return nfserr_notsupp;
READ_BUF(8 + NFS4_VERIFIER_SIZE);
COPYMEM(&scd_c->sc_clientid, 8);
COPYMEM(&scd_c->sc_confirm, NFS4_VERIFIER_SIZE);
DECODE_TAIL;
}
/* Also used for NVERIFY */
static __be32
nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify)
{
DECODE_HEAD;
if ((status = nfsd4_decode_bitmap(argp, verify->ve_bmval)))
goto out;
/* For convenience's sake, we compare raw xdr'd attributes in
* nfsd4_proc_verify */
READ_BUF(4);
verify->ve_attrlen = be32_to_cpup(p++);
READ_BUF(verify->ve_attrlen);
SAVEMEM(verify->ve_attrval, verify->ve_attrlen);
DECODE_TAIL;
}
static __be32
nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
{
int avail;
int len;
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &write->wr_stateid);
if (status)
return status;
READ_BUF(16);
p = xdr_decode_hyper(p, &write->wr_offset);
write->wr_stable_how = be32_to_cpup(p++);
if (write->wr_stable_how > NFS_FILE_SYNC)
goto xdr_error;
write->wr_buflen = be32_to_cpup(p++);
/* Sorry .. no magic macros for this.. *
* READ_BUF(write->wr_buflen);
* SAVEMEM(write->wr_buf, write->wr_buflen);
*/
avail = (char*)argp->end - (char*)argp->p;
if (avail + argp->pagelen < write->wr_buflen) {
dprintk("NFSD: xdr error (%s:%d)\n",
__FILE__, __LINE__);
goto xdr_error;
}
write->wr_head.iov_base = p;
write->wr_head.iov_len = avail;
write->wr_pagelist = argp->pagelist;
len = XDR_QUADLEN(write->wr_buflen) << 2;
if (len >= avail) {
int pages;
len -= avail;
pages = len >> PAGE_SHIFT;
argp->pagelist += pages;
argp->pagelen -= pages * PAGE_SIZE;
len -= pages * PAGE_SIZE;
argp->p = (__be32 *)page_address(argp->pagelist[0]);
argp->pagelist++;
argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
}
argp->p += XDR_QUADLEN(len);
DECODE_TAIL;
}
static __be32
nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp, struct nfsd4_release_lockowner *rlockowner)
{
DECODE_HEAD;
if (argp->minorversion >= 1)
return nfserr_notsupp;
READ_BUF(12);
COPYMEM(&rlockowner->rl_clientid, sizeof(clientid_t));
rlockowner->rl_owner.len = be32_to_cpup(p++);
READ_BUF(rlockowner->rl_owner.len);
READMEM(rlockowner->rl_owner.data, rlockowner->rl_owner.len);
if (argp->minorversion && !zero_clientid(&rlockowner->rl_clientid))
return nfserr_inval;
DECODE_TAIL;
}
static __be32
nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
struct nfsd4_exchange_id *exid)
{
int dummy, tmp;
DECODE_HEAD;
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(exid->verifier.data, NFS4_VERIFIER_SIZE);
status = nfsd4_decode_opaque(argp, &exid->clname);
if (status)
return nfserr_bad_xdr;
READ_BUF(4);
exid->flags = be32_to_cpup(p++);
/* Ignore state_protect4_a */
READ_BUF(4);
exid->spa_how = be32_to_cpup(p++);
switch (exid->spa_how) {
case SP4_NONE:
break;
case SP4_MACH_CRED:
/* spo_must_enforce */
status = nfsd4_decode_bitmap(argp,
exid->spo_must_enforce);
if (status)
goto out;
/* spo_must_allow */
status = nfsd4_decode_bitmap(argp, exid->spo_must_allow);
if (status)
goto out;
break;
case SP4_SSV:
/* ssp_ops */
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy * 4);
p += dummy;
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy * 4);
p += dummy;
/* ssp_hash_algs<> */
READ_BUF(4);
tmp = be32_to_cpup(p++);
while (tmp--) {
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy);
p += XDR_QUADLEN(dummy);
}
/* ssp_encr_algs<> */
READ_BUF(4);
tmp = be32_to_cpup(p++);
while (tmp--) {
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy);
p += XDR_QUADLEN(dummy);
}
/* ssp_window and ssp_num_gss_handles */
READ_BUF(8);
dummy = be32_to_cpup(p++);
dummy = be32_to_cpup(p++);
break;
default:
goto xdr_error;
}
/* Ignore Implementation ID */
READ_BUF(4); /* nfs_impl_id4 array length */
dummy = be32_to_cpup(p++);
if (dummy > 1)
goto xdr_error;
if (dummy == 1) {
/* nii_domain */
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy);
p += XDR_QUADLEN(dummy);
/* nii_name */
READ_BUF(4);
dummy = be32_to_cpup(p++);
READ_BUF(dummy);
p += XDR_QUADLEN(dummy);
/* nii_date */
READ_BUF(12);
p += 3;
}
DECODE_TAIL;
}
static __be32
nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
struct nfsd4_create_session *sess)
{
DECODE_HEAD;
u32 dummy;
READ_BUF(16);
COPYMEM(&sess->clientid, 8);
sess->seqid = be32_to_cpup(p++);
sess->flags = be32_to_cpup(p++);
/* Fore channel attrs */
READ_BUF(28);
dummy = be32_to_cpup(p++); /* headerpadsz is always 0 */
sess->fore_channel.maxreq_sz = be32_to_cpup(p++);
sess->fore_channel.maxresp_sz = be32_to_cpup(p++);
sess->fore_channel.maxresp_cached = be32_to_cpup(p++);
sess->fore_channel.maxops = be32_to_cpup(p++);
sess->fore_channel.maxreqs = be32_to_cpup(p++);
sess->fore_channel.nr_rdma_attrs = be32_to_cpup(p++);
if (sess->fore_channel.nr_rdma_attrs == 1) {
READ_BUF(4);
sess->fore_channel.rdma_attrs = be32_to_cpup(p++);
} else if (sess->fore_channel.nr_rdma_attrs > 1) {
dprintk("Too many fore channel attr bitmaps!\n");
goto xdr_error;
}
/* Back channel attrs */
READ_BUF(28);
dummy = be32_to_cpup(p++); /* headerpadsz is always 0 */
sess->back_channel.maxreq_sz = be32_to_cpup(p++);
sess->back_channel.maxresp_sz = be32_to_cpup(p++);
sess->back_channel.maxresp_cached = be32_to_cpup(p++);
sess->back_channel.maxops = be32_to_cpup(p++);
sess->back_channel.maxreqs = be32_to_cpup(p++);
sess->back_channel.nr_rdma_attrs = be32_to_cpup(p++);
if (sess->back_channel.nr_rdma_attrs == 1) {
READ_BUF(4);
sess->back_channel.rdma_attrs = be32_to_cpup(p++);
} else if (sess->back_channel.nr_rdma_attrs > 1) {
dprintk("Too many back channel attr bitmaps!\n");
goto xdr_error;
}
READ_BUF(4);
sess->callback_prog = be32_to_cpup(p++);
nfsd4_decode_cb_sec(argp, &sess->cb_sec);
DECODE_TAIL;
}
static __be32
nfsd4_decode_destroy_session(struct nfsd4_compoundargs *argp,
struct nfsd4_destroy_session *destroy_session)
{
DECODE_HEAD;
READ_BUF(NFS4_MAX_SESSIONID_LEN);
COPYMEM(destroy_session->sessionid.data, NFS4_MAX_SESSIONID_LEN);
DECODE_TAIL;
}
static __be32
nfsd4_decode_free_stateid(struct nfsd4_compoundargs *argp,
struct nfsd4_free_stateid *free_stateid)
{
DECODE_HEAD;
READ_BUF(sizeof(stateid_t));
free_stateid->fr_stateid.si_generation = be32_to_cpup(p++);
COPYMEM(&free_stateid->fr_stateid.si_opaque, sizeof(stateid_opaque_t));
DECODE_TAIL;
}
static __be32
nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
struct nfsd4_sequence *seq)
{
DECODE_HEAD;
READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
COPYMEM(seq->sessionid.data, NFS4_MAX_SESSIONID_LEN);
seq->seqid = be32_to_cpup(p++);
seq->slotid = be32_to_cpup(p++);
seq->maxslots = be32_to_cpup(p++);
seq->cachethis = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_stateid *test_stateid)
{
int i;
__be32 *p, status;
struct nfsd4_test_stateid_id *stateid;
READ_BUF(4);
test_stateid->ts_num_ids = ntohl(*p++);
INIT_LIST_HEAD(&test_stateid->ts_stateid_list);
for (i = 0; i < test_stateid->ts_num_ids; i++) {
stateid = svcxdr_tmpalloc(argp, sizeof(*stateid));
if (!stateid) {
status = nfserrno(-ENOMEM);
goto out;
}
INIT_LIST_HEAD(&stateid->ts_id_list);
list_add_tail(&stateid->ts_id_list, &test_stateid->ts_stateid_list);
status = nfsd4_decode_stateid(argp, &stateid->ts_id_stateid);
if (status)
goto out;
}
status = 0;
out:
return status;
xdr_error:
dprintk("NFSD: xdr error (%s:%d)\n", __FILE__, __LINE__);
status = nfserr_bad_xdr;
goto out;
}
static __be32 nfsd4_decode_destroy_clientid(struct nfsd4_compoundargs *argp, struct nfsd4_destroy_clientid *dc)
{
DECODE_HEAD;
READ_BUF(8);
COPYMEM(&dc->clientid, 8);
DECODE_TAIL;
}
static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, struct nfsd4_reclaim_complete *rc)
{
DECODE_HEAD;
READ_BUF(4);
rc->rca_one_fs = be32_to_cpup(p++);
DECODE_TAIL;
}
#ifdef CONFIG_NFSD_PNFS
static __be32
nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
struct nfsd4_getdeviceinfo *gdev)
{
DECODE_HEAD;
u32 num, i;
READ_BUF(sizeof(struct nfsd4_deviceid) + 3 * 4);
COPYMEM(&gdev->gd_devid, sizeof(struct nfsd4_deviceid));
gdev->gd_layout_type = be32_to_cpup(p++);
gdev->gd_maxcount = be32_to_cpup(p++);
num = be32_to_cpup(p++);
if (num) {
READ_BUF(4 * num);
gdev->gd_notify_types = be32_to_cpup(p++);
for (i = 1; i < num; i++) {
if (be32_to_cpup(p++)) {
status = nfserr_inval;
goto out;
}
}
}
DECODE_TAIL;
}
static __be32
nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp,
struct nfsd4_layoutget *lgp)
{
DECODE_HEAD;
READ_BUF(36);
lgp->lg_signal = be32_to_cpup(p++);
lgp->lg_layout_type = be32_to_cpup(p++);
lgp->lg_seg.iomode = be32_to_cpup(p++);
p = xdr_decode_hyper(p, &lgp->lg_seg.offset);
p = xdr_decode_hyper(p, &lgp->lg_seg.length);
p = xdr_decode_hyper(p, &lgp->lg_minlength);
status = nfsd4_decode_stateid(argp, &lgp->lg_sid);
if (status)
return status;
READ_BUF(4);
lgp->lg_maxcount = be32_to_cpup(p++);
DECODE_TAIL;
}
static __be32
nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
struct nfsd4_layoutcommit *lcp)
{
DECODE_HEAD;
u32 timechange;
READ_BUF(20);
p = xdr_decode_hyper(p, &lcp->lc_seg.offset);
p = xdr_decode_hyper(p, &lcp->lc_seg.length);
lcp->lc_reclaim = be32_to_cpup(p++);
status = nfsd4_decode_stateid(argp, &lcp->lc_sid);
if (status)
return status;
READ_BUF(4);
lcp->lc_newoffset = be32_to_cpup(p++);
if (lcp->lc_newoffset) {
READ_BUF(8);
p = xdr_decode_hyper(p, &lcp->lc_last_wr);
} else
lcp->lc_last_wr = 0;
READ_BUF(4);
timechange = be32_to_cpup(p++);
if (timechange) {
status = nfsd4_decode_time(argp, &lcp->lc_mtime);
if (status)
return status;
} else {
lcp->lc_mtime.tv_nsec = UTIME_NOW;
}
READ_BUF(8);
lcp->lc_layout_type = be32_to_cpup(p++);
/*
* Save the layout update in XDR format and let the layout driver deal
* with it later.
*/
lcp->lc_up_len = be32_to_cpup(p++);
if (lcp->lc_up_len > 0) {
READ_BUF(lcp->lc_up_len);
READMEM(lcp->lc_up_layout, lcp->lc_up_len);
}
DECODE_TAIL;
}
static __be32
nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp,
struct nfsd4_layoutreturn *lrp)
{
DECODE_HEAD;
READ_BUF(16);
lrp->lr_reclaim = be32_to_cpup(p++);
lrp->lr_layout_type = be32_to_cpup(p++);
lrp->lr_seg.iomode = be32_to_cpup(p++);
lrp->lr_return_type = be32_to_cpup(p++);
if (lrp->lr_return_type == RETURN_FILE) {
READ_BUF(16);
p = xdr_decode_hyper(p, &lrp->lr_seg.offset);
p = xdr_decode_hyper(p, &lrp->lr_seg.length);
status = nfsd4_decode_stateid(argp, &lrp->lr_sid);
if (status)
return status;
READ_BUF(4);
lrp->lrf_body_len = be32_to_cpup(p++);
if (lrp->lrf_body_len > 0) {
READ_BUF(lrp->lrf_body_len);
READMEM(lrp->lrf_body, lrp->lrf_body_len);
}
} else {
lrp->lr_seg.offset = 0;
lrp->lr_seg.length = NFS4_MAX_UINT64;
}
DECODE_TAIL;
}
#endif /* CONFIG_NFSD_PNFS */
static __be32
nfsd4_decode_fallocate(struct nfsd4_compoundargs *argp,
struct nfsd4_fallocate *fallocate)
{
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &fallocate->falloc_stateid);
if (status)
return status;
READ_BUF(16);
p = xdr_decode_hyper(p, &fallocate->falloc_offset);
xdr_decode_hyper(p, &fallocate->falloc_length);
DECODE_TAIL;
}
static __be32
nfsd4_decode_clone(struct nfsd4_compoundargs *argp, struct nfsd4_clone *clone)
{
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &clone->cl_src_stateid);
if (status)
return status;
status = nfsd4_decode_stateid(argp, &clone->cl_dst_stateid);
if (status)
return status;
READ_BUF(8 + 8 + 8);
p = xdr_decode_hyper(p, &clone->cl_src_pos);
p = xdr_decode_hyper(p, &clone->cl_dst_pos);
p = xdr_decode_hyper(p, &clone->cl_count);
DECODE_TAIL;
}
static __be32
nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
{
DECODE_HEAD;
unsigned int tmp;
status = nfsd4_decode_stateid(argp, ©->cp_src_stateid);
if (status)
return status;
status = nfsd4_decode_stateid(argp, ©->cp_dst_stateid);
if (status)
return status;
READ_BUF(8 + 8 + 8 + 4 + 4 + 4);
p = xdr_decode_hyper(p, ©->cp_src_pos);
p = xdr_decode_hyper(p, ©->cp_dst_pos);
p = xdr_decode_hyper(p, ©->cp_count);
copy->cp_consecutive = be32_to_cpup(p++);
copy->cp_synchronous = be32_to_cpup(p++);
tmp = be32_to_cpup(p); /* Source server list not supported */
DECODE_TAIL;
}
static __be32
nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
{
DECODE_HEAD;
status = nfsd4_decode_stateid(argp, &seek->seek_stateid);
if (status)
return status;
READ_BUF(8 + 4);
p = xdr_decode_hyper(p, &seek->seek_offset);
seek->seek_whence = be32_to_cpup(p);
DECODE_TAIL;
}
static __be32
nfsd4_decode_noop(struct nfsd4_compoundargs *argp, void *p)
{
return nfs_ok;
}
static __be32
nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
{
return nfserr_notsupp;
}
typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
static nfsd4_dec nfsd4_dec_ops[] = {
[OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
[OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
[OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
[OP_CREATE] = (nfsd4_dec)nfsd4_decode_create,
[OP_DELEGPURGE] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_DELEGRETURN] = (nfsd4_dec)nfsd4_decode_delegreturn,
[OP_GETATTR] = (nfsd4_dec)nfsd4_decode_getattr,
[OP_GETFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_LINK] = (nfsd4_dec)nfsd4_decode_link,
[OP_LOCK] = (nfsd4_dec)nfsd4_decode_lock,
[OP_LOCKT] = (nfsd4_dec)nfsd4_decode_lockt,
[OP_LOCKU] = (nfsd4_dec)nfsd4_decode_locku,
[OP_LOOKUP] = (nfsd4_dec)nfsd4_decode_lookup,
[OP_LOOKUPP] = (nfsd4_dec)nfsd4_decode_noop,
[OP_NVERIFY] = (nfsd4_dec)nfsd4_decode_verify,
[OP_OPEN] = (nfsd4_dec)nfsd4_decode_open,
[OP_OPENATTR] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_OPEN_CONFIRM] = (nfsd4_dec)nfsd4_decode_open_confirm,
[OP_OPEN_DOWNGRADE] = (nfsd4_dec)nfsd4_decode_open_downgrade,
[OP_PUTFH] = (nfsd4_dec)nfsd4_decode_putfh,
[OP_PUTPUBFH] = (nfsd4_dec)nfsd4_decode_putpubfh,
[OP_PUTROOTFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_READ] = (nfsd4_dec)nfsd4_decode_read,
[OP_READDIR] = (nfsd4_dec)nfsd4_decode_readdir,
[OP_READLINK] = (nfsd4_dec)nfsd4_decode_noop,
[OP_REMOVE] = (nfsd4_dec)nfsd4_decode_remove,
[OP_RENAME] = (nfsd4_dec)nfsd4_decode_rename,
[OP_RENEW] = (nfsd4_dec)nfsd4_decode_renew,
[OP_RESTOREFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_SAVEFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_SECINFO] = (nfsd4_dec)nfsd4_decode_secinfo,
[OP_SETATTR] = (nfsd4_dec)nfsd4_decode_setattr,
[OP_SETCLIENTID] = (nfsd4_dec)nfsd4_decode_setclientid,
[OP_SETCLIENTID_CONFIRM] = (nfsd4_dec)nfsd4_decode_setclientid_confirm,
[OP_VERIFY] = (nfsd4_dec)nfsd4_decode_verify,
[OP_WRITE] = (nfsd4_dec)nfsd4_decode_write,
[OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
/* new operations for NFSv4.1 */
[OP_BACKCHANNEL_CTL] = (nfsd4_dec)nfsd4_decode_backchannel_ctl,
[OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_bind_conn_to_session,
[OP_EXCHANGE_ID] = (nfsd4_dec)nfsd4_decode_exchange_id,
[OP_CREATE_SESSION] = (nfsd4_dec)nfsd4_decode_create_session,
[OP_DESTROY_SESSION] = (nfsd4_dec)nfsd4_decode_destroy_session,
[OP_FREE_STATEID] = (nfsd4_dec)nfsd4_decode_free_stateid,
[OP_GET_DIR_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_getdeviceinfo,
[OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_layoutcommit,
[OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_layoutget,
[OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_layoutreturn,
#else
[OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_notsupp,
#endif
[OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_secinfo_no_name,
[OP_SEQUENCE] = (nfsd4_dec)nfsd4_decode_sequence,
[OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_TEST_STATEID] = (nfsd4_dec)nfsd4_decode_test_stateid,
[OP_WANT_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_DESTROY_CLIENTID] = (nfsd4_dec)nfsd4_decode_destroy_clientid,
[OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_reclaim_complete,
/* new operations for NFSv4.2 */
[OP_ALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate,
[OP_COPY] = (nfsd4_dec)nfsd4_decode_copy,
[OP_COPY_NOTIFY] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_DEALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate,
[OP_IO_ADVISE] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTERROR] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTSTATS] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_OFFLOAD_CANCEL] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_OFFLOAD_STATUS] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_READ_PLUS] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_SEEK] = (nfsd4_dec)nfsd4_decode_seek,
[OP_WRITE_SAME] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_CLONE] = (nfsd4_dec)nfsd4_decode_clone,
};
static inline bool
nfsd4_opnum_in_range(struct nfsd4_compoundargs *argp, struct nfsd4_op *op)
{
if (op->opnum < FIRST_NFS4_OP)
return false;
else if (argp->minorversion == 0 && op->opnum > LAST_NFS40_OP)
return false;
else if (argp->minorversion == 1 && op->opnum > LAST_NFS41_OP)
return false;
else if (argp->minorversion == 2 && op->opnum > LAST_NFS42_OP)
return false;
return true;
}
static __be32
nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
{
DECODE_HEAD;
struct nfsd4_op *op;
bool cachethis = false;
int auth_slack= argp->rqstp->rq_auth_slack;
int max_reply = auth_slack + 8; /* opcnt, status */
int readcount = 0;
int readbytes = 0;
int i;
READ_BUF(4);
argp->taglen = be32_to_cpup(p++);
READ_BUF(argp->taglen);
SAVEMEM(argp->tag, argp->taglen);
READ_BUF(8);
argp->minorversion = be32_to_cpup(p++);
argp->opcnt = be32_to_cpup(p++);
max_reply += 4 + (XDR_QUADLEN(argp->taglen) << 2);
if (argp->taglen > NFSD4_MAX_TAGLEN)
goto xdr_error;
if (argp->opcnt > 100)
goto xdr_error;
if (argp->opcnt > ARRAY_SIZE(argp->iops)) {
argp->ops = kzalloc(argp->opcnt * sizeof(*argp->ops), GFP_KERNEL);
if (!argp->ops) {
argp->ops = argp->iops;
dprintk("nfsd: couldn't allocate room for COMPOUND\n");
goto xdr_error;
}
}
if (argp->minorversion > NFSD_SUPPORTED_MINOR_VERSION)
argp->opcnt = 0;
for (i = 0; i < argp->opcnt; i++) {
op = &argp->ops[i];
op->replay = NULL;
READ_BUF(4);
op->opnum = be32_to_cpup(p++);
if (nfsd4_opnum_in_range(argp, op))
op->status = nfsd4_dec_ops[op->opnum](argp, &op->u);
else {
op->opnum = OP_ILLEGAL;
op->status = nfserr_op_illegal;
}
/*
* We'll try to cache the result in the DRC if any one
* op in the compound wants to be cached:
*/
cachethis |= nfsd4_cache_this_op(op);
if (op->opnum == OP_READ) {
readcount++;
readbytes += nfsd4_max_reply(argp->rqstp, op);
} else
max_reply += nfsd4_max_reply(argp->rqstp, op);
/*
* OP_LOCK and OP_LOCKT may return a conflicting lock.
* (Special case because it will just skip encoding this
* if it runs out of xdr buffer space, and it is the only
* operation that behaves this way.)
*/
if (op->opnum == OP_LOCK || op->opnum == OP_LOCKT)
max_reply += NFS4_OPAQUE_LIMIT;
if (op->status) {
argp->opcnt = i+1;
break;
}
}
/* Sessions make the DRC unnecessary: */
if (argp->minorversion)
cachethis = false;
svc_reserve(argp->rqstp, max_reply + readbytes);
argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
if (readcount > 1 || max_reply > PAGE_SIZE - auth_slack)
clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
DECODE_TAIL;
}
static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
struct svc_export *exp)
{
if (exp->ex_flags & NFSEXP_V4ROOT) {
*p++ = cpu_to_be32(convert_to_wallclock(exp->cd->flush_time));
*p++ = 0;
} else if (IS_I_VERSION(inode)) {
p = xdr_encode_hyper(p, inode->i_version);
} else {
*p++ = cpu_to_be32(stat->ctime.tv_sec);
*p++ = cpu_to_be32(stat->ctime.tv_nsec);
}
return p;
}
static __be32 *encode_cinfo(__be32 *p, struct nfsd4_change_info *c)
{
*p++ = cpu_to_be32(c->atomic);
if (c->change_supported) {
p = xdr_encode_hyper(p, c->before_change);
p = xdr_encode_hyper(p, c->after_change);
} else {
*p++ = cpu_to_be32(c->before_ctime_sec);
*p++ = cpu_to_be32(c->before_ctime_nsec);
*p++ = cpu_to_be32(c->after_ctime_sec);
*p++ = cpu_to_be32(c->after_ctime_nsec);
}
return p;
}
/* Encode as an array of strings the string given with components
* separated @sep, escaped with esc_enter and esc_exit.
*/
static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep,
char *components, char esc_enter,
char esc_exit)
{
__be32 *p;
__be32 pathlen;
int pathlen_offset;
int strlen, count=0;
char *str, *end, *next;
dprintk("nfsd4_encode_components(%s)\n", components);
pathlen_offset = xdr->buf->len;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
p++; /* We will fill this in with @count later */
end = str = components;
while (*end) {
bool found_esc = false;
/* try to parse as esc_start, ..., esc_end, sep */
if (*str == esc_enter) {
for (; *end && (*end != esc_exit); end++)
/* find esc_exit or end of string */;
next = end + 1;
if (*end && (!*next || *next == sep)) {
str++;
found_esc = true;
}
}
if (!found_esc)
for (; *end && (*end != sep); end++)
/* find sep or end of string */;
strlen = end - str;
if (strlen) {
p = xdr_reserve_space(xdr, strlen + 4);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque(p, str, strlen);
count++;
}
else
end++;
if (found_esc)
end = next;
str = end;
}
pathlen = htonl(count);
write_bytes_to_xdr_buf(xdr->buf, pathlen_offset, &pathlen, 4);
return 0;
}
/* Encode as an array of strings the string given with components
* separated @sep.
*/
static __be32 nfsd4_encode_components(struct xdr_stream *xdr, char sep,
char *components)
{
return nfsd4_encode_components_esc(xdr, sep, components, 0, 0);
}
/*
* encode a location element of a fs_locations structure
*/
static __be32 nfsd4_encode_fs_location4(struct xdr_stream *xdr,
struct nfsd4_fs_location *location)
{
__be32 status;
status = nfsd4_encode_components_esc(xdr, ':', location->hosts,
'[', ']');
if (status)
return status;
status = nfsd4_encode_components(xdr, '/', location->path);
if (status)
return status;
return 0;
}
/*
* Encode a path in RFC3530 'pathname4' format
*/
static __be32 nfsd4_encode_path(struct xdr_stream *xdr,
const struct path *root,
const struct path *path)
{
struct path cur = *path;
__be32 *p;
struct dentry **components = NULL;
unsigned int ncomponents = 0;
__be32 err = nfserr_jukebox;
dprintk("nfsd4_encode_components(");
path_get(&cur);
/* First walk the path up to the nfsd root, and store the
* dentries/path components in an array.
*/
for (;;) {
if (path_equal(&cur, root))
break;
if (cur.dentry == cur.mnt->mnt_root) {
if (follow_up(&cur))
continue;
goto out_free;
}
if ((ncomponents & 15) == 0) {
struct dentry **new;
new = krealloc(components,
sizeof(*new) * (ncomponents + 16),
GFP_KERNEL);
if (!new)
goto out_free;
components = new;
}
components[ncomponents++] = cur.dentry;
cur.dentry = dget_parent(cur.dentry);
}
err = nfserr_resource;
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_free;
*p++ = cpu_to_be32(ncomponents);
while (ncomponents) {
struct dentry *dentry = components[ncomponents - 1];
unsigned int len;
spin_lock(&dentry->d_lock);
len = dentry->d_name.len;
p = xdr_reserve_space(xdr, len + 4);
if (!p) {
spin_unlock(&dentry->d_lock);
goto out_free;
}
p = xdr_encode_opaque(p, dentry->d_name.name, len);
dprintk("/%pd", dentry);
spin_unlock(&dentry->d_lock);
dput(dentry);
ncomponents--;
}
err = 0;
out_free:
dprintk(")\n");
while (ncomponents)
dput(components[--ncomponents]);
kfree(components);
path_put(&cur);
return err;
}
static __be32 nfsd4_encode_fsloc_fsroot(struct xdr_stream *xdr,
struct svc_rqst *rqstp, const struct path *path)
{
struct svc_export *exp_ps;
__be32 res;
exp_ps = rqst_find_fsidzero_export(rqstp);
if (IS_ERR(exp_ps))
return nfserrno(PTR_ERR(exp_ps));
res = nfsd4_encode_path(xdr, &exp_ps->ex_path, path);
exp_put(exp_ps);
return res;
}
/*
* encode a fs_locations structure
*/
static __be32 nfsd4_encode_fs_locations(struct xdr_stream *xdr,
struct svc_rqst *rqstp, struct svc_export *exp)
{
__be32 status;
int i;
__be32 *p;
struct nfsd4_fs_locations *fslocs = &exp->ex_fslocs;
status = nfsd4_encode_fsloc_fsroot(xdr, rqstp, &exp->ex_path);
if (status)
return status;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(fslocs->locations_count);
for (i=0; i<fslocs->locations_count; i++) {
status = nfsd4_encode_fs_location4(xdr, &fslocs->locations[i]);
if (status)
return status;
}
return 0;
}
static u32 nfs4_file_type(umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFIFO: return NF4FIFO;
case S_IFCHR: return NF4CHR;
case S_IFDIR: return NF4DIR;
case S_IFBLK: return NF4BLK;
case S_IFLNK: return NF4LNK;
case S_IFREG: return NF4REG;
case S_IFSOCK: return NF4SOCK;
default: return NF4BAD;
};
}
static inline __be32
nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
struct nfs4_ace *ace)
{
if (ace->whotype != NFS4_ACL_WHO_NAMED)
return nfs4_acl_write_who(xdr, ace->whotype);
else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
return nfsd4_encode_group(xdr, rqstp, ace->who_gid);
else
return nfsd4_encode_user(xdr, rqstp, ace->who_uid);
}
static inline __be32
nfsd4_encode_layout_types(struct xdr_stream *xdr, u32 layout_types)
{
__be32 *p;
unsigned long i = hweight_long(layout_types);
p = xdr_reserve_space(xdr, 4 + 4 * i);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(i);
for (i = LAYOUT_NFSV4_1_FILES; i < LAYOUT_TYPE_MAX; ++i)
if (layout_types & (1 << i))
*p++ = cpu_to_be32(i);
return 0;
}
#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
FATTR4_WORD0_RDATTR_ERROR)
#define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
#define WORD2_ABSENT_FS_ATTRS 0
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
static inline __be32
nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
void *context, int len)
{
__be32 *p;
p = xdr_reserve_space(xdr, len + 4 + 4 + 4);
if (!p)
return nfserr_resource;
/*
* For now we use a 0 here to indicate the null translation; in
* the future we may place a call to translation code here.
*/
*p++ = cpu_to_be32(0); /* lfs */
*p++ = cpu_to_be32(0); /* pi */
p = xdr_encode_opaque(p, context, len);
return 0;
}
#else
static inline __be32
nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
void *context, int len)
{ return 0; }
#endif
static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err)
{
/* As per referral draft: */
if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
*bmval1 & ~WORD1_ABSENT_FS_ATTRS) {
if (*bmval0 & FATTR4_WORD0_RDATTR_ERROR ||
*bmval0 & FATTR4_WORD0_FS_LOCATIONS)
*rdattr_err = NFSERR_MOVED;
else
return nfserr_moved;
}
*bmval0 &= WORD0_ABSENT_FS_ATTRS;
*bmval1 &= WORD1_ABSENT_FS_ATTRS;
*bmval2 &= WORD2_ABSENT_FS_ATTRS;
return 0;
}
static int get_parent_attributes(struct svc_export *exp, struct kstat *stat)
{
struct path path = exp->ex_path;
int err;
path_get(&path);
while (follow_up(&path)) {
if (path.dentry != path.mnt->mnt_root)
break;
}
err = vfs_getattr(&path, stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
path_put(&path);
return err;
}
static __be32
nfsd4_encode_bitmap(struct xdr_stream *xdr, u32 bmval0, u32 bmval1, u32 bmval2)
{
__be32 *p;
if (bmval2) {
p = xdr_reserve_space(xdr, 16);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(3);
*p++ = cpu_to_be32(bmval0);
*p++ = cpu_to_be32(bmval1);
*p++ = cpu_to_be32(bmval2);
} else if (bmval1) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(2);
*p++ = cpu_to_be32(bmval0);
*p++ = cpu_to_be32(bmval1);
} else {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
*p++ = cpu_to_be32(bmval0);
}
return 0;
out_resource:
return nfserr_resource;
}
/*
* Note: @fhp can be NULL; in this case, we might have to compose the filehandle
* ourselves.
*/
static __be32
nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
struct svc_export *exp,
struct dentry *dentry, u32 *bmval,
struct svc_rqst *rqstp, int ignore_crossmnt)
{
u32 bmval0 = bmval[0];
u32 bmval1 = bmval[1];
u32 bmval2 = bmval[2];
struct kstat stat;
struct svc_fh *tempfh = NULL;
struct kstatfs statfs;
__be32 *p;
int starting_len = xdr->buf->len;
int attrlen_offset;
__be32 attrlen;
u32 dummy;
u64 dummy64;
u32 rdattr_err = 0;
__be32 status;
int err;
struct nfs4_acl *acl = NULL;
void *context = NULL;
int contextlen;
bool contextsupport = false;
struct nfsd4_compoundres *resp = rqstp->rq_resp;
u32 minorversion = resp->cstate.minorversion;
struct path path = {
.mnt = exp->ex_path.mnt,
.dentry = dentry,
};
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
BUG_ON(!nfsd_attrs_supported(minorversion, bmval));
if (exp->ex_fslocs.migrated) {
status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
if (status)
goto out;
}
err = vfs_getattr(&path, &stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
if (err)
goto out_nfserr;
if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
(bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
FATTR4_WORD1_SPACE_TOTAL))) {
err = vfs_statfs(&path, &statfs);
if (err)
goto out_nfserr;
}
if ((bmval0 & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) && !fhp) {
tempfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
status = nfserr_jukebox;
if (!tempfh)
goto out;
fh_init(tempfh, NFS4_FHSIZE);
status = fh_compose(tempfh, exp, dentry, NULL);
if (status)
goto out;
fhp = tempfh;
}
if (bmval0 & FATTR4_WORD0_ACL) {
err = nfsd4_get_nfs4_acl(rqstp, dentry, &acl);
if (err == -EOPNOTSUPP)
bmval0 &= ~FATTR4_WORD0_ACL;
else if (err == -EINVAL) {
status = nfserr_attrnotsupp;
goto out;
} else if (err != 0)
goto out_nfserr;
}
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
if (exp->ex_flags & NFSEXP_SECURITY_LABEL)
err = security_inode_getsecctx(d_inode(dentry),
&context, &contextlen);
else
err = -EOPNOTSUPP;
contextsupport = (err == 0);
if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
if (err == -EOPNOTSUPP)
bmval2 &= ~FATTR4_WORD2_SECURITY_LABEL;
else if (err)
goto out_nfserr;
}
}
#endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
status = nfsd4_encode_bitmap(xdr, bmval0, bmval1, bmval2);
if (status)
goto out;
attrlen_offset = xdr->buf->len;
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
p++; /* to be backfilled later */
if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
u32 supp[3];
memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
if (!IS_POSIXACL(dentry->d_inode))
supp[0] &= ~FATTR4_WORD0_ACL;
if (!contextsupport)
supp[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
if (!supp[2]) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(2);
*p++ = cpu_to_be32(supp[0]);
*p++ = cpu_to_be32(supp[1]);
} else {
p = xdr_reserve_space(xdr, 16);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(3);
*p++ = cpu_to_be32(supp[0]);
*p++ = cpu_to_be32(supp[1]);
*p++ = cpu_to_be32(supp[2]);
}
}
if (bmval0 & FATTR4_WORD0_TYPE) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
dummy = nfs4_file_type(stat.mode);
if (dummy == NF4BAD) {
status = nfserr_serverfault;
goto out;
}
*p++ = cpu_to_be32(dummy);
}
if (bmval0 & FATTR4_WORD0_FH_EXPIRE_TYPE) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
if (exp->ex_flags & NFSEXP_NOSUBTREECHECK)
*p++ = cpu_to_be32(NFS4_FH_PERSISTENT);
else
*p++ = cpu_to_be32(NFS4_FH_PERSISTENT|
NFS4_FH_VOL_RENAME);
}
if (bmval0 & FATTR4_WORD0_CHANGE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = encode_change(p, &stat, d_inode(dentry), exp);
}
if (bmval0 & FATTR4_WORD0_SIZE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, stat.size);
}
if (bmval0 & FATTR4_WORD0_LINK_SUPPORT) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_SYMLINK_SUPPORT) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_NAMED_ATTR) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(0);
}
if (bmval0 & FATTR4_WORD0_FSID) {
p = xdr_reserve_space(xdr, 16);
if (!p)
goto out_resource;
if (exp->ex_fslocs.migrated) {
p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MAJOR);
p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MINOR);
} else switch(fsid_source(fhp)) {
case FSIDSOURCE_FSID:
p = xdr_encode_hyper(p, (u64)exp->ex_fsid);
p = xdr_encode_hyper(p, (u64)0);
break;
case FSIDSOURCE_DEV:
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(MAJOR(stat.dev));
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(MINOR(stat.dev));
break;
case FSIDSOURCE_UUID:
p = xdr_encode_opaque_fixed(p, exp->ex_uuid,
EX_UUID_LEN);
break;
}
}
if (bmval0 & FATTR4_WORD0_UNIQUE_HANDLES) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(0);
}
if (bmval0 & FATTR4_WORD0_LEASE_TIME) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(nn->nfsd4_lease);
}
if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(rdattr_err);
}
if (bmval0 & FATTR4_WORD0_ACL) {
struct nfs4_ace *ace;
if (acl == NULL) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(0);
goto out_acl;
}
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(acl->naces);
for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) {
p = xdr_reserve_space(xdr, 4*3);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(ace->type);
*p++ = cpu_to_be32(ace->flag);
*p++ = cpu_to_be32(ace->access_mask &
NFS4_ACE_MASK_ALL);
status = nfsd4_encode_aclname(xdr, rqstp, ace);
if (status)
goto out;
}
}
out_acl:
if (bmval0 & FATTR4_WORD0_ACLSUPPORT) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(IS_POSIXACL(dentry->d_inode) ?
ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL : 0);
}
if (bmval0 & FATTR4_WORD0_CANSETTIME) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(0);
}
if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_CHOWN_RESTRICTED) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_FILEHANDLE) {
p = xdr_reserve_space(xdr, fhp->fh_handle.fh_size + 4);
if (!p)
goto out_resource;
p = xdr_encode_opaque(p, &fhp->fh_handle.fh_base,
fhp->fh_handle.fh_size);
}
if (bmval0 & FATTR4_WORD0_FILEID) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, stat.ino);
}
if (bmval0 & FATTR4_WORD0_FILES_AVAIL) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (u64) statfs.f_ffree);
}
if (bmval0 & FATTR4_WORD0_FILES_FREE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (u64) statfs.f_ffree);
}
if (bmval0 & FATTR4_WORD0_FILES_TOTAL) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (u64) statfs.f_files);
}
if (bmval0 & FATTR4_WORD0_FS_LOCATIONS) {
status = nfsd4_encode_fs_locations(xdr, rqstp, exp);
if (status)
goto out;
}
if (bmval0 & FATTR4_WORD0_HOMOGENEOUS) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval0 & FATTR4_WORD0_MAXFILESIZE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, exp->ex_path.mnt->mnt_sb->s_maxbytes);
}
if (bmval0 & FATTR4_WORD0_MAXLINK) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(255);
}
if (bmval0 & FATTR4_WORD0_MAXNAME) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(statfs.f_namelen);
}
if (bmval0 & FATTR4_WORD0_MAXREAD) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (u64) svc_max_payload(rqstp));
}
if (bmval0 & FATTR4_WORD0_MAXWRITE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (u64) svc_max_payload(rqstp));
}
if (bmval1 & FATTR4_WORD1_MODE) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(stat.mode & S_IALLUGO);
}
if (bmval1 & FATTR4_WORD1_NO_TRUNC) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
}
if (bmval1 & FATTR4_WORD1_NUMLINKS) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(stat.nlink);
}
if (bmval1 & FATTR4_WORD1_OWNER) {
status = nfsd4_encode_user(xdr, rqstp, stat.uid);
if (status)
goto out;
}
if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
status = nfsd4_encode_group(xdr, rqstp, stat.gid);
if (status)
goto out;
}
if (bmval1 & FATTR4_WORD1_RAWDEV) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
*p++ = cpu_to_be32((u32) MAJOR(stat.rdev));
*p++ = cpu_to_be32((u32) MINOR(stat.rdev));
}
if (bmval1 & FATTR4_WORD1_SPACE_AVAIL) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
dummy64 = (u64)statfs.f_bavail * (u64)statfs.f_bsize;
p = xdr_encode_hyper(p, dummy64);
}
if (bmval1 & FATTR4_WORD1_SPACE_FREE) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
dummy64 = (u64)statfs.f_bfree * (u64)statfs.f_bsize;
p = xdr_encode_hyper(p, dummy64);
}
if (bmval1 & FATTR4_WORD1_SPACE_TOTAL) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
dummy64 = (u64)statfs.f_blocks * (u64)statfs.f_bsize;
p = xdr_encode_hyper(p, dummy64);
}
if (bmval1 & FATTR4_WORD1_SPACE_USED) {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
dummy64 = (u64)stat.blocks << 9;
p = xdr_encode_hyper(p, dummy64);
}
if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (s64)stat.atime.tv_sec);
*p++ = cpu_to_be32(stat.atime.tv_nsec);
}
if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(1);
*p++ = cpu_to_be32(0);
}
if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (s64)stat.ctime.tv_sec);
*p++ = cpu_to_be32(stat.ctime.tv_nsec);
}
if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
p = xdr_encode_hyper(p, (s64)stat.mtime.tv_sec);
*p++ = cpu_to_be32(stat.mtime.tv_nsec);
}
if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
struct kstat parent_stat;
u64 ino = stat.ino;
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
/*
* Get parent's attributes if not ignoring crossmount
* and this is the root of a cross-mounted filesystem.
*/
if (ignore_crossmnt == 0 &&
dentry == exp->ex_path.mnt->mnt_root) {
err = get_parent_attributes(exp, &parent_stat);
if (err)
goto out_nfserr;
ino = parent_stat.ino;
}
p = xdr_encode_hyper(p, ino);
}
#ifdef CONFIG_NFSD_PNFS
if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) {
status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
if (status)
goto out;
}
if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) {
status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
if (status)
goto out;
}
if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(stat.blksize);
}
#endif /* CONFIG_NFSD_PNFS */
if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
status = nfsd4_encode_bitmap(xdr, NFSD_SUPPATTR_EXCLCREAT_WORD0,
NFSD_SUPPATTR_EXCLCREAT_WORD1,
NFSD_SUPPATTR_EXCLCREAT_WORD2);
if (status)
goto out;
}
if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
status = nfsd4_encode_security_label(xdr, rqstp, context,
contextlen);
if (status)
goto out;
}
attrlen = htonl(xdr->buf->len - attrlen_offset - 4);
write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, 4);
status = nfs_ok;
out:
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
if (context)
security_release_secctx(context, contextlen);
#endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
kfree(acl);
if (tempfh) {
fh_put(tempfh);
kfree(tempfh);
}
if (status)
xdr_truncate_encode(xdr, starting_len);
return status;
out_nfserr:
status = nfserrno(err);
goto out;
out_resource:
status = nfserr_resource;
goto out;
}
static void svcxdr_init_encode_from_buffer(struct xdr_stream *xdr,
struct xdr_buf *buf, __be32 *p, int bytes)
{
xdr->scratch.iov_len = 0;
memset(buf, 0, sizeof(struct xdr_buf));
buf->head[0].iov_base = p;
buf->head[0].iov_len = 0;
buf->len = 0;
xdr->buf = buf;
xdr->iov = buf->head;
xdr->p = p;
xdr->end = (void *)p + bytes;
buf->buflen = bytes;
}
__be32 nfsd4_encode_fattr_to_buf(__be32 **p, int words,
struct svc_fh *fhp, struct svc_export *exp,
struct dentry *dentry, u32 *bmval,
struct svc_rqst *rqstp, int ignore_crossmnt)
{
struct xdr_buf dummy;
struct xdr_stream xdr;
__be32 ret;
svcxdr_init_encode_from_buffer(&xdr, &dummy, *p, words << 2);
ret = nfsd4_encode_fattr(&xdr, fhp, exp, dentry, bmval, rqstp,
ignore_crossmnt);
*p = xdr.p;
return ret;
}
static inline int attributes_need_mount(u32 *bmval)
{
if (bmval[0] & ~(FATTR4_WORD0_RDATTR_ERROR | FATTR4_WORD0_LEASE_TIME))
return 1;
if (bmval[1] & ~FATTR4_WORD1_MOUNTED_ON_FILEID)
return 1;
return 0;
}
static __be32
nfsd4_encode_dirent_fattr(struct xdr_stream *xdr, struct nfsd4_readdir *cd,
const char *name, int namlen)
{
struct svc_export *exp = cd->rd_fhp->fh_export;
struct dentry *dentry;
__be32 nfserr;
int ignore_crossmnt = 0;
dentry = lookup_one_len_unlocked(name, cd->rd_fhp->fh_dentry, namlen);
if (IS_ERR(dentry))
return nfserrno(PTR_ERR(dentry));
if (d_really_is_negative(dentry)) {
/*
* we're not holding the i_mutex here, so there's
* a window where this directory entry could have gone
* away.
*/
dput(dentry);
return nfserr_noent;
}
exp_get(exp);
/*
* In the case of a mountpoint, the client may be asking for
* attributes that are only properties of the underlying filesystem
* as opposed to the cross-mounted file system. In such a case,
* we will not follow the cross mount and will fill the attribtutes
* directly from the mountpoint dentry.
*/
if (nfsd_mountpoint(dentry, exp)) {
int err;
if (!(exp->ex_flags & NFSEXP_V4ROOT)
&& !attributes_need_mount(cd->rd_bmval)) {
ignore_crossmnt = 1;
goto out_encode;
}
/*
* Why the heck aren't we just using nfsd_lookup??
* Different "."/".." handling? Something else?
* At least, add a comment here to explain....
*/
err = nfsd_cross_mnt(cd->rd_rqstp, &dentry, &exp);
if (err) {
nfserr = nfserrno(err);
goto out_put;
}
nfserr = check_nfsd_access(exp, cd->rd_rqstp);
if (nfserr)
goto out_put;
}
out_encode:
nfserr = nfsd4_encode_fattr(xdr, NULL, exp, dentry, cd->rd_bmval,
cd->rd_rqstp, ignore_crossmnt);
out_put:
dput(dentry);
exp_put(exp);
return nfserr;
}
static __be32 *
nfsd4_encode_rdattr_error(struct xdr_stream *xdr, __be32 nfserr)
{
__be32 *p;
p = xdr_reserve_space(xdr, 20);
if (!p)
return NULL;
*p++ = htonl(2);
*p++ = htonl(FATTR4_WORD0_RDATTR_ERROR); /* bmval0 */
*p++ = htonl(0); /* bmval1 */
*p++ = htonl(4); /* attribute length */
*p++ = nfserr; /* no htonl */
return p;
}
static int
nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
struct readdir_cd *ccd = ccdv;
struct nfsd4_readdir *cd = container_of(ccd, struct nfsd4_readdir, common);
struct xdr_stream *xdr = cd->xdr;
int start_offset = xdr->buf->len;
int cookie_offset;
u32 name_and_cookie;
int entry_bytes;
__be32 nfserr = nfserr_toosmall;
__be64 wire_offset;
__be32 *p;
/* In nfsv4, "." and ".." never make it onto the wire.. */
if (name && isdotent(name, namlen)) {
cd->common.err = nfs_ok;
return 0;
}
if (cd->cookie_offset) {
wire_offset = cpu_to_be64(offset);
write_bytes_to_xdr_buf(xdr->buf, cd->cookie_offset,
&wire_offset, 8);
}
p = xdr_reserve_space(xdr, 4);
if (!p)
goto fail;
*p++ = xdr_one; /* mark entry present */
cookie_offset = xdr->buf->len;
p = xdr_reserve_space(xdr, 3*4 + namlen);
if (!p)
goto fail;
p = xdr_encode_hyper(p, NFS_OFFSET_MAX); /* offset of next entry */
p = xdr_encode_array(p, name, namlen); /* name length & name */
nfserr = nfsd4_encode_dirent_fattr(xdr, cd, name, namlen);
switch (nfserr) {
case nfs_ok:
break;
case nfserr_resource:
nfserr = nfserr_toosmall;
goto fail;
case nfserr_noent:
xdr_truncate_encode(xdr, start_offset);
goto skip_entry;
default:
/*
* If the client requested the RDATTR_ERROR attribute,
* we stuff the error code into this attribute
* and continue. If this attribute was not requested,
* then in accordance with the spec, we fail the
* entire READDIR operation(!)
*/
if (!(cd->rd_bmval[0] & FATTR4_WORD0_RDATTR_ERROR))
goto fail;
p = nfsd4_encode_rdattr_error(xdr, nfserr);
if (p == NULL) {
nfserr = nfserr_toosmall;
goto fail;
}
}
nfserr = nfserr_toosmall;
entry_bytes = xdr->buf->len - start_offset;
if (entry_bytes > cd->rd_maxcount)
goto fail;
cd->rd_maxcount -= entry_bytes;
/*
* RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
* let's always let through the first entry, at least:
*/
if (!cd->rd_dircount)
goto fail;
name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
goto fail;
cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
cd->cookie_offset = cookie_offset;
skip_entry:
cd->common.err = nfs_ok;
return 0;
fail:
xdr_truncate_encode(xdr, start_offset);
cd->common.err = nfserr;
return -EINVAL;
}
static __be32
nfsd4_encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
{
__be32 *p;
p = xdr_reserve_space(xdr, sizeof(stateid_t));
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(sid->si_generation);
p = xdr_encode_opaque_fixed(p, &sid->si_opaque,
sizeof(stateid_opaque_t));
return 0;
}
static __be32
nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_access *access)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 8);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(access->ac_supported);
*p++ = cpu_to_be32(access->ac_resp_access);
}
return nfserr;
}
static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_bind_conn_to_session *bcts)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 8);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque_fixed(p, bcts->sessionid.data,
NFS4_MAX_SESSIONID_LEN);
*p++ = cpu_to_be32(bcts->dir);
/* Upshifting from TCP to RDMA is not supported */
*p++ = cpu_to_be32(0);
}
return nfserr;
}
static __be32
nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close)
{
struct xdr_stream *xdr = &resp->xdr;
if (!nfserr)
nfserr = nfsd4_encode_stateid(xdr, &close->cl_stateid);
return nfserr;
}
static __be32
nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_commit *commit)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque_fixed(p, commit->co_verf.data,
NFS4_VERIFIER_SIZE);
}
return nfserr;
}
static __be32
nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_create *create)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 20);
if (!p)
return nfserr_resource;
encode_cinfo(p, &create->cr_cinfo);
nfserr = nfsd4_encode_bitmap(xdr, create->cr_bmval[0],
create->cr_bmval[1], create->cr_bmval[2]);
}
return nfserr;
}
static __be32
nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_getattr *getattr)
{
struct svc_fh *fhp = getattr->ga_fhp;
struct xdr_stream *xdr = &resp->xdr;
if (nfserr)
return nfserr;
nfserr = nfsd4_encode_fattr(xdr, fhp, fhp->fh_export, fhp->fh_dentry,
getattr->ga_bmval,
resp->rqstp, 0);
return nfserr;
}
static __be32
nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr, struct svc_fh **fhpp)
{
struct xdr_stream *xdr = &resp->xdr;
struct svc_fh *fhp = *fhpp;
unsigned int len;
__be32 *p;
if (!nfserr) {
len = fhp->fh_handle.fh_size;
p = xdr_reserve_space(xdr, len + 4);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque(p, &fhp->fh_handle.fh_base, len);
}
return nfserr;
}
/*
* Including all fields other than the name, a LOCK4denied structure requires
* 8(clientid) + 4(namelen) + 8(offset) + 8(length) + 4(type) = 32 bytes.
*/
static __be32
nfsd4_encode_lock_denied(struct xdr_stream *xdr, struct nfsd4_lock_denied *ld)
{
struct xdr_netobj *conf = &ld->ld_owner;
__be32 *p;
again:
p = xdr_reserve_space(xdr, 32 + XDR_LEN(conf->len));
if (!p) {
/*
* Don't fail to return the result just because we can't
* return the conflicting open:
*/
if (conf->len) {
kfree(conf->data);
conf->len = 0;
conf->data = NULL;
goto again;
}
return nfserr_resource;
}
p = xdr_encode_hyper(p, ld->ld_start);
p = xdr_encode_hyper(p, ld->ld_length);
*p++ = cpu_to_be32(ld->ld_type);
if (conf->len) {
p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
p = xdr_encode_opaque(p, conf->data, conf->len);
kfree(conf->data);
} else { /* non - nfsv4 lock in conflict, no clientid nor owner */
p = xdr_encode_hyper(p, (u64)0); /* clientid */
*p++ = cpu_to_be32(0); /* length of owner name */
}
return nfserr_denied;
}
static __be32
nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lock *lock)
{
struct xdr_stream *xdr = &resp->xdr;
if (!nfserr)
nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
else if (nfserr == nfserr_denied)
nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
return nfserr;
}
static __be32
nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lockt *lockt)
{
struct xdr_stream *xdr = &resp->xdr;
if (nfserr == nfserr_denied)
nfsd4_encode_lock_denied(xdr, &lockt->lt_denied);
return nfserr;
}
static __be32
nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_locku *locku)
{
struct xdr_stream *xdr = &resp->xdr;
if (!nfserr)
nfserr = nfsd4_encode_stateid(xdr, &locku->lu_stateid);
return nfserr;
}
static __be32
nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_link *link)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 20);
if (!p)
return nfserr_resource;
p = encode_cinfo(p, &link->li_cinfo);
}
return nfserr;
}
static __be32
nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (nfserr)
goto out;
nfserr = nfsd4_encode_stateid(xdr, &open->op_stateid);
if (nfserr)
goto out;
p = xdr_reserve_space(xdr, 24);
if (!p)
return nfserr_resource;
p = encode_cinfo(p, &open->op_cinfo);
*p++ = cpu_to_be32(open->op_rflags);
nfserr = nfsd4_encode_bitmap(xdr, open->op_bmval[0], open->op_bmval[1],
open->op_bmval[2]);
if (nfserr)
goto out;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(open->op_delegate_type);
switch (open->op_delegate_type) {
case NFS4_OPEN_DELEGATE_NONE:
break;
case NFS4_OPEN_DELEGATE_READ:
nfserr = nfsd4_encode_stateid(xdr, &open->op_delegate_stateid);
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 20);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(open->op_recall);
/*
* TODO: ACE's in delegations
*/
*p++ = cpu_to_be32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0); /* XXX: is NULL principal ok? */
break;
case NFS4_OPEN_DELEGATE_WRITE:
nfserr = nfsd4_encode_stateid(xdr, &open->op_delegate_stateid);
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 32);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(0);
/*
* TODO: space_limit's in delegations
*/
*p++ = cpu_to_be32(NFS4_LIMIT_SIZE);
*p++ = cpu_to_be32(~(u32)0);
*p++ = cpu_to_be32(~(u32)0);
/*
* TODO: ACE's in delegations
*/
*p++ = cpu_to_be32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0); /* XXX: is NULL principal ok? */
break;
case NFS4_OPEN_DELEGATE_NONE_EXT: /* 4.1 */
switch (open->op_why_no_deleg) {
case WND4_CONTENTION:
case WND4_RESOURCE:
p = xdr_reserve_space(xdr, 8);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(open->op_why_no_deleg);
/* deleg signaling not supported yet: */
*p++ = cpu_to_be32(0);
break;
default:
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(open->op_why_no_deleg);
}
break;
default:
BUG();
}
/* XXX save filehandle here */
out:
return nfserr;
}
static __be32
nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_confirm *oc)
{
struct xdr_stream *xdr = &resp->xdr;
if (!nfserr)
nfserr = nfsd4_encode_stateid(xdr, &oc->oc_resp_stateid);
return nfserr;
}
static __be32
nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_downgrade *od)
{
struct xdr_stream *xdr = &resp->xdr;
if (!nfserr)
nfserr = nfsd4_encode_stateid(xdr, &od->od_stateid);
return nfserr;
}
static __be32 nfsd4_encode_splice_read(
struct nfsd4_compoundres *resp,
struct nfsd4_read *read,
struct file *file, unsigned long maxcount)
{
struct xdr_stream *xdr = &resp->xdr;
struct xdr_buf *buf = xdr->buf;
u32 eof;
long len;
int space_left;
__be32 nfserr;
__be32 *p = xdr->p - 2;
/* Make sure there will be room for padding if needed */
if (xdr->end - xdr->p < 1)
return nfserr_resource;
len = maxcount;
nfserr = nfsd_splice_read(read->rd_rqstp, file,
read->rd_offset, &maxcount);
if (nfserr) {
/*
* nfsd_splice_actor may have already messed with the
* page length; reset it so as not to confuse
* xdr_truncate_encode:
*/
buf->page_len = 0;
return nfserr;
}
eof = nfsd_eof_on_read(len, maxcount, read->rd_offset,
d_inode(read->rd_fhp->fh_dentry)->i_size);
*(p++) = htonl(eof);
*(p++) = htonl(maxcount);
buf->page_len = maxcount;
buf->len += maxcount;
xdr->page_ptr += (buf->page_base + maxcount + PAGE_SIZE - 1)
/ PAGE_SIZE;
/* Use rest of head for padding and remaining ops: */
buf->tail[0].iov_base = xdr->p;
buf->tail[0].iov_len = 0;
xdr->iov = buf->tail;
if (maxcount&3) {
int pad = 4 - (maxcount&3);
*(xdr->p++) = 0;
buf->tail[0].iov_base += maxcount&3;
buf->tail[0].iov_len = pad;
buf->len += pad;
}
space_left = min_t(int, (void *)xdr->end - (void *)xdr->p,
buf->buflen - buf->len);
buf->buflen = buf->len + space_left;
xdr->end = (__be32 *)((void *)xdr->end + space_left);
return 0;
}
static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
struct nfsd4_read *read,
struct file *file, unsigned long maxcount)
{
struct xdr_stream *xdr = &resp->xdr;
u32 eof;
int v;
int starting_len = xdr->buf->len - 8;
long len;
int thislen;
__be32 nfserr;
__be32 tmp;
__be32 *p;
u32 zzz = 0;
int pad;
len = maxcount;
v = 0;
thislen = min_t(long, len, ((void *)xdr->end - (void *)xdr->p));
p = xdr_reserve_space(xdr, (thislen+3)&~3);
WARN_ON_ONCE(!p);
resp->rqstp->rq_vec[v].iov_base = p;
resp->rqstp->rq_vec[v].iov_len = thislen;
v++;
len -= thislen;
while (len) {
thislen = min_t(long, len, PAGE_SIZE);
p = xdr_reserve_space(xdr, (thislen+3)&~3);
WARN_ON_ONCE(!p);
resp->rqstp->rq_vec[v].iov_base = p;
resp->rqstp->rq_vec[v].iov_len = thislen;
v++;
len -= thislen;
}
read->rd_vlen = v;
len = maxcount;
nfserr = nfsd_readv(file, read->rd_offset, resp->rqstp->rq_vec,
read->rd_vlen, &maxcount);
if (nfserr)
return nfserr;
xdr_truncate_encode(xdr, starting_len + 8 + ((maxcount+3)&~3));
eof = nfsd_eof_on_read(len, maxcount, read->rd_offset,
d_inode(read->rd_fhp->fh_dentry)->i_size);
tmp = htonl(eof);
write_bytes_to_xdr_buf(xdr->buf, starting_len , &tmp, 4);
tmp = htonl(maxcount);
write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
pad = (maxcount&3) ? 4 - (maxcount&3) : 0;
write_bytes_to_xdr_buf(xdr->buf, starting_len + 8 + maxcount,
&zzz, pad);
return 0;
}
static __be32
nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_read *read)
{
unsigned long maxcount;
struct xdr_stream *xdr = &resp->xdr;
struct file *file = read->rd_filp;
int starting_len = xdr->buf->len;
struct raparms *ra = NULL;
__be32 *p;
if (nfserr)
goto out;
p = xdr_reserve_space(xdr, 8); /* eof flag and byte count */
if (!p) {
WARN_ON_ONCE(test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags));
nfserr = nfserr_resource;
goto out;
}
if (resp->xdr.buf->page_len &&
test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags)) {
WARN_ON_ONCE(1);
nfserr = nfserr_resource;
goto out;
}
xdr_commit_encode(xdr);
maxcount = svc_max_payload(resp->rqstp);
maxcount = min_t(unsigned long, maxcount,
(xdr->buf->buflen - xdr->buf->len));
maxcount = min_t(unsigned long, maxcount, read->rd_length);
if (read->rd_tmp_file)
ra = nfsd_init_raparms(file);
if (file->f_op->splice_read &&
test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
nfserr = nfsd4_encode_splice_read(resp, read, file, maxcount);
else
nfserr = nfsd4_encode_readv(resp, read, file, maxcount);
if (ra)
nfsd_put_raparams(file, ra);
if (nfserr)
xdr_truncate_encode(xdr, starting_len);
out:
if (file)
fput(file);
return nfserr;
}
static __be32
nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_readlink *readlink)
{
int maxcount;
__be32 wire_count;
int zero = 0;
struct xdr_stream *xdr = &resp->xdr;
int length_offset = xdr->buf->len;
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
maxcount = PAGE_SIZE;
p = xdr_reserve_space(xdr, maxcount);
if (!p)
return nfserr_resource;
/*
* XXX: By default, vfs_readlink() will truncate symlinks if they
* would overflow the buffer. Is this kosher in NFSv4? If not, one
* easy fix is: if vfs_readlink() precisely fills the buffer, assume
* that truncation occurred, and return NFS4ERR_RESOURCE.
*/
nfserr = nfsd_readlink(readlink->rl_rqstp, readlink->rl_fhp,
(char *)p, &maxcount);
if (nfserr == nfserr_isdir)
nfserr = nfserr_inval;
if (nfserr) {
xdr_truncate_encode(xdr, length_offset);
return nfserr;
}
wire_count = htonl(maxcount);
write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, 4);
xdr_truncate_encode(xdr, length_offset + 4 + ALIGN(maxcount, 4));
if (maxcount & 3)
write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount,
&zero, 4 - (maxcount&3));
return 0;
}
static __be32
nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_readdir *readdir)
{
int maxcount;
int bytes_left;
loff_t offset;
__be64 wire_offset;
struct xdr_stream *xdr = &resp->xdr;
int starting_len = xdr->buf->len;
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE);
if (!p)
return nfserr_resource;
/* XXX: Following NFSv3, we ignore the READDIR verifier for now. */
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
resp->xdr.buf->head[0].iov_len = ((char *)resp->xdr.p)
- (char *)resp->xdr.buf->head[0].iov_base;
/*
* Number of bytes left for directory entries allowing for the
* final 8 bytes of the readdir and a following failed op:
*/
bytes_left = xdr->buf->buflen - xdr->buf->len
- COMPOUND_ERR_SLACK_SPACE - 8;
if (bytes_left < 0) {
nfserr = nfserr_resource;
goto err_no_verf;
}
maxcount = min_t(u32, readdir->rd_maxcount, INT_MAX);
/*
* Note the rfc defines rd_maxcount as the size of the
* READDIR4resok structure, which includes the verifier above
* and the 8 bytes encoded at the end of this function:
*/
if (maxcount < 16) {
nfserr = nfserr_toosmall;
goto err_no_verf;
}
maxcount = min_t(int, maxcount-16, bytes_left);
/* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
if (!readdir->rd_dircount)
readdir->rd_dircount = INT_MAX;
readdir->xdr = xdr;
readdir->rd_maxcount = maxcount;
readdir->common.err = 0;
readdir->cookie_offset = 0;
offset = readdir->rd_cookie;
nfserr = nfsd_readdir(readdir->rd_rqstp, readdir->rd_fhp,
&offset,
&readdir->common, nfsd4_encode_dirent);
if (nfserr == nfs_ok &&
readdir->common.err == nfserr_toosmall &&
xdr->buf->len == starting_len + 8) {
/* nothing encoded; which limit did we hit?: */
if (maxcount - 16 < bytes_left)
/* It was the fault of rd_maxcount: */
nfserr = nfserr_toosmall;
else
/* We ran out of buffer space: */
nfserr = nfserr_resource;
}
if (nfserr)
goto err_no_verf;
if (readdir->cookie_offset) {
wire_offset = cpu_to_be64(offset);
write_bytes_to_xdr_buf(xdr->buf, readdir->cookie_offset,
&wire_offset, 8);
}
p = xdr_reserve_space(xdr, 8);
if (!p) {
WARN_ON_ONCE(1);
goto err_no_verf;
}
*p++ = 0; /* no more entries */
*p++ = htonl(readdir->common.err == nfserr_eof);
return 0;
err_no_verf:
xdr_truncate_encode(xdr, starting_len);
return nfserr;
}
static __be32
nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_remove *remove)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 20);
if (!p)
return nfserr_resource;
p = encode_cinfo(p, &remove->rm_cinfo);
}
return nfserr;
}
static __be32
nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_rename *rename)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 40);
if (!p)
return nfserr_resource;
p = encode_cinfo(p, &rename->rn_sinfo);
p = encode_cinfo(p, &rename->rn_tinfo);
}
return nfserr;
}
static __be32
nfsd4_do_encode_secinfo(struct xdr_stream *xdr,
__be32 nfserr, struct svc_export *exp)
{
u32 i, nflavs, supported;
struct exp_flavor_info *flavs;
struct exp_flavor_info def_flavs[2];
__be32 *p, *flavorsp;
static bool report = true;
if (nfserr)
goto out;
nfserr = nfserr_resource;
if (exp->ex_nflavors) {
flavs = exp->ex_flavors;
nflavs = exp->ex_nflavors;
} else { /* Handling of some defaults in absence of real secinfo: */
flavs = def_flavs;
if (exp->ex_client->flavour->flavour == RPC_AUTH_UNIX) {
nflavs = 2;
flavs[0].pseudoflavor = RPC_AUTH_UNIX;
flavs[1].pseudoflavor = RPC_AUTH_NULL;
} else if (exp->ex_client->flavour->flavour == RPC_AUTH_GSS) {
nflavs = 1;
flavs[0].pseudoflavor
= svcauth_gss_flavor(exp->ex_client);
} else {
nflavs = 1;
flavs[0].pseudoflavor
= exp->ex_client->flavour->flavour;
}
}
supported = 0;
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out;
flavorsp = p++; /* to be backfilled later */
for (i = 0; i < nflavs; i++) {
rpc_authflavor_t pf = flavs[i].pseudoflavor;
struct rpcsec_gss_info info;
if (rpcauth_get_gssinfo(pf, &info) == 0) {
supported++;
p = xdr_reserve_space(xdr, 4 + 4 +
XDR_LEN(info.oid.len) + 4 + 4);
if (!p)
goto out;
*p++ = cpu_to_be32(RPC_AUTH_GSS);
p = xdr_encode_opaque(p, info.oid.data, info.oid.len);
*p++ = cpu_to_be32(info.qop);
*p++ = cpu_to_be32(info.service);
} else if (pf < RPC_AUTH_MAXFLAVOR) {
supported++;
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out;
*p++ = cpu_to_be32(pf);
} else {
if (report)
pr_warn("NFS: SECINFO: security flavor %u "
"is not supported\n", pf);
}
}
if (nflavs != supported)
report = false;
*flavorsp = htonl(supported);
nfserr = 0;
out:
if (exp)
exp_put(exp);
return nfserr;
}
static __be32
nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_secinfo *secinfo)
{
struct xdr_stream *xdr = &resp->xdr;
return nfsd4_do_encode_secinfo(xdr, nfserr, secinfo->si_exp);
}
static __be32
nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_secinfo_no_name *secinfo)
{
struct xdr_stream *xdr = &resp->xdr;
return nfsd4_do_encode_secinfo(xdr, nfserr, secinfo->sin_exp);
}
/*
* The SETATTR encode routine is special -- it always encodes a bitmap,
* regardless of the error status.
*/
static __be32
nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setattr *setattr)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
p = xdr_reserve_space(xdr, 16);
if (!p)
return nfserr_resource;
if (nfserr) {
*p++ = cpu_to_be32(3);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
}
else {
*p++ = cpu_to_be32(3);
*p++ = cpu_to_be32(setattr->sa_bmval[0]);
*p++ = cpu_to_be32(setattr->sa_bmval[1]);
*p++ = cpu_to_be32(setattr->sa_bmval[2]);
}
return nfserr;
}
static __be32
nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setclientid *scd)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 8 + NFS4_VERIFIER_SIZE);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque_fixed(p, &scd->se_clientid, 8);
p = xdr_encode_opaque_fixed(p, &scd->se_confirm,
NFS4_VERIFIER_SIZE);
}
else if (nfserr == nfserr_clid_inuse) {
p = xdr_reserve_space(xdr, 8);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(0);
}
return nfserr;
}
static __be32
nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_write *write)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 16);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(write->wr_bytes_written);
*p++ = cpu_to_be32(write->wr_how_written);
p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
NFS4_VERIFIER_SIZE);
}
return nfserr;
}
static __be32
nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_exchange_id *exid)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
char *major_id;
char *server_scope;
int major_id_sz;
int server_scope_sz;
int status = 0;
uint64_t minor_id = 0;
if (nfserr)
return nfserr;
major_id = utsname()->nodename;
major_id_sz = strlen(major_id);
server_scope = utsname()->nodename;
server_scope_sz = strlen(server_scope);
p = xdr_reserve_space(xdr,
8 /* eir_clientid */ +
4 /* eir_sequenceid */ +
4 /* eir_flags */ +
4 /* spr_how */);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque_fixed(p, &exid->clientid, 8);
*p++ = cpu_to_be32(exid->seqid);
*p++ = cpu_to_be32(exid->flags);
*p++ = cpu_to_be32(exid->spa_how);
switch (exid->spa_how) {
case SP4_NONE:
break;
case SP4_MACH_CRED:
/* spo_must_enforce bitmap: */
status = nfsd4_encode_bitmap(xdr,
exid->spo_must_enforce[0],
exid->spo_must_enforce[1],
exid->spo_must_enforce[2]);
if (status)
goto out;
/* spo_must_allow bitmap: */
status = nfsd4_encode_bitmap(xdr,
exid->spo_must_allow[0],
exid->spo_must_allow[1],
exid->spo_must_allow[2]);
if (status)
goto out;
break;
default:
WARN_ON_ONCE(1);
}
p = xdr_reserve_space(xdr,
8 /* so_minor_id */ +
4 /* so_major_id.len */ +
(XDR_QUADLEN(major_id_sz) * 4) +
4 /* eir_server_scope.len */ +
(XDR_QUADLEN(server_scope_sz) * 4) +
4 /* eir_server_impl_id.count (0) */);
if (!p)
return nfserr_resource;
/* The server_owner struct */
p = xdr_encode_hyper(p, minor_id); /* Minor id */
/* major id */
p = xdr_encode_opaque(p, major_id, major_id_sz);
/* Server scope */
p = xdr_encode_opaque(p, server_scope, server_scope_sz);
/* Implementation id */
*p++ = cpu_to_be32(0); /* zero length nfs_impl_id4 array */
return 0;
out:
return status;
}
static __be32
nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_create_session *sess)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 24);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque_fixed(p, sess->sessionid.data,
NFS4_MAX_SESSIONID_LEN);
*p++ = cpu_to_be32(sess->seqid);
*p++ = cpu_to_be32(sess->flags);
p = xdr_reserve_space(xdr, 28);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(0); /* headerpadsz */
*p++ = cpu_to_be32(sess->fore_channel.maxreq_sz);
*p++ = cpu_to_be32(sess->fore_channel.maxresp_sz);
*p++ = cpu_to_be32(sess->fore_channel.maxresp_cached);
*p++ = cpu_to_be32(sess->fore_channel.maxops);
*p++ = cpu_to_be32(sess->fore_channel.maxreqs);
*p++ = cpu_to_be32(sess->fore_channel.nr_rdma_attrs);
if (sess->fore_channel.nr_rdma_attrs) {
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(sess->fore_channel.rdma_attrs);
}
p = xdr_reserve_space(xdr, 28);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(0); /* headerpadsz */
*p++ = cpu_to_be32(sess->back_channel.maxreq_sz);
*p++ = cpu_to_be32(sess->back_channel.maxresp_sz);
*p++ = cpu_to_be32(sess->back_channel.maxresp_cached);
*p++ = cpu_to_be32(sess->back_channel.maxops);
*p++ = cpu_to_be32(sess->back_channel.maxreqs);
*p++ = cpu_to_be32(sess->back_channel.nr_rdma_attrs);
if (sess->back_channel.nr_rdma_attrs) {
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(sess->back_channel.rdma_attrs);
}
return 0;
}
static __be32
nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_sequence *seq)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 20);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque_fixed(p, seq->sessionid.data,
NFS4_MAX_SESSIONID_LEN);
*p++ = cpu_to_be32(seq->seqid);
*p++ = cpu_to_be32(seq->slotid);
/* Note slotid's are numbered from zero: */
*p++ = cpu_to_be32(seq->maxslots - 1); /* sr_highest_slotid */
*p++ = cpu_to_be32(seq->maxslots - 1); /* sr_target_highest_slotid */
*p++ = cpu_to_be32(seq->status_flags);
resp->cstate.data_offset = xdr->buf->len; /* DRC cache data pointer */
return 0;
}
static __be32
nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_test_stateid *test_stateid)
{
struct xdr_stream *xdr = &resp->xdr;
struct nfsd4_test_stateid_id *stateid, *next;
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 4 + (4 * test_stateid->ts_num_ids));
if (!p)
return nfserr_resource;
*p++ = htonl(test_stateid->ts_num_ids);
list_for_each_entry_safe(stateid, next, &test_stateid->ts_stateid_list, ts_id_list) {
*p++ = stateid->ts_id_status;
}
return nfserr;
}
#ifdef CONFIG_NFSD_PNFS
static __be32
nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_getdeviceinfo *gdev)
{
struct xdr_stream *xdr = &resp->xdr;
const struct nfsd4_layout_ops *ops =
nfsd4_layout_ops[gdev->gd_layout_type];
u32 starting_len = xdr->buf->len, needed_len;
__be32 *p;
dprintk("%s: err %d\n", __func__, be32_to_cpu(nfserr));
if (nfserr)
goto out;
nfserr = nfserr_resource;
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out;
*p++ = cpu_to_be32(gdev->gd_layout_type);
/* If maxcount is 0 then just update notifications */
if (gdev->gd_maxcount != 0) {
nfserr = ops->encode_getdeviceinfo(xdr, gdev);
if (nfserr) {
/*
* We don't bother to burden the layout drivers with
* enforcing gd_maxcount, just tell the client to
* come back with a bigger buffer if it's not enough.
*/
if (xdr->buf->len + 4 > gdev->gd_maxcount)
goto toosmall;
goto out;
}
}
nfserr = nfserr_resource;
if (gdev->gd_notify_types) {
p = xdr_reserve_space(xdr, 4 + 4);
if (!p)
goto out;
*p++ = cpu_to_be32(1); /* bitmap length */
*p++ = cpu_to_be32(gdev->gd_notify_types);
} else {
p = xdr_reserve_space(xdr, 4);
if (!p)
goto out;
*p++ = 0;
}
nfserr = 0;
out:
kfree(gdev->gd_device);
dprintk("%s: done: %d\n", __func__, be32_to_cpu(nfserr));
return nfserr;
toosmall:
dprintk("%s: maxcount too small\n", __func__);
needed_len = xdr->buf->len + 4 /* notifications */;
xdr_truncate_encode(xdr, starting_len);
p = xdr_reserve_space(xdr, 4);
if (!p) {
nfserr = nfserr_resource;
} else {
*p++ = cpu_to_be32(needed_len);
nfserr = nfserr_toosmall;
}
goto out;
}
static __be32
nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_layoutget *lgp)
{
struct xdr_stream *xdr = &resp->xdr;
const struct nfsd4_layout_ops *ops =
nfsd4_layout_ops[lgp->lg_layout_type];
__be32 *p;
dprintk("%s: err %d\n", __func__, nfserr);
if (nfserr)
goto out;
nfserr = nfserr_resource;
p = xdr_reserve_space(xdr, 36 + sizeof(stateid_opaque_t));
if (!p)
goto out;
*p++ = cpu_to_be32(1); /* we always set return-on-close */
*p++ = cpu_to_be32(lgp->lg_sid.si_generation);
p = xdr_encode_opaque_fixed(p, &lgp->lg_sid.si_opaque,
sizeof(stateid_opaque_t));
*p++ = cpu_to_be32(1); /* we always return a single layout */
p = xdr_encode_hyper(p, lgp->lg_seg.offset);
p = xdr_encode_hyper(p, lgp->lg_seg.length);
*p++ = cpu_to_be32(lgp->lg_seg.iomode);
*p++ = cpu_to_be32(lgp->lg_layout_type);
nfserr = ops->encode_layoutget(xdr, lgp);
out:
kfree(lgp->lg_content);
return nfserr;
}
static __be32
nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_layoutcommit *lcp)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(lcp->lc_size_chg);
if (lcp->lc_size_chg) {
p = xdr_reserve_space(xdr, 8);
if (!p)
return nfserr_resource;
p = xdr_encode_hyper(p, lcp->lc_newsize);
}
return nfs_ok;
}
static __be32
nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_layoutreturn *lrp)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(lrp->lrs_present);
if (lrp->lrs_present)
return nfsd4_encode_stateid(xdr, &lrp->lr_sid);
return nfs_ok;
}
#endif /* CONFIG_NFSD_PNFS */
static __be32
nfsd42_encode_write_res(struct nfsd4_compoundres *resp, struct nfsd42_write_res *write)
{
__be32 *p;
p = xdr_reserve_space(&resp->xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(0);
p = xdr_encode_hyper(p, write->wr_bytes_written);
*p++ = cpu_to_be32(write->wr_stable_how);
p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
NFS4_VERIFIER_SIZE);
return nfs_ok;
}
static __be32
nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_copy *copy)
{
__be32 *p;
if (!nfserr) {
nfserr = nfsd42_encode_write_res(resp, ©->cp_res);
if (nfserr)
return nfserr;
p = xdr_reserve_space(&resp->xdr, 4 + 4);
*p++ = cpu_to_be32(copy->cp_consecutive);
*p++ = cpu_to_be32(copy->cp_synchronous);
}
return nfserr;
}
static __be32
nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_seek *seek)
{
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(&resp->xdr, 4 + 8);
*p++ = cpu_to_be32(seek->seek_eof);
p = xdr_encode_hyper(p, seek->seek_pos);
return nfserr;
}
static __be32
nfsd4_encode_noop(struct nfsd4_compoundres *resp, __be32 nfserr, void *p)
{
return nfserr;
}
typedef __be32(* nfsd4_enc)(struct nfsd4_compoundres *, __be32, void *);
/*
* Note: nfsd4_enc_ops vector is shared for v4.0 and v4.1
* since we don't need to filter out obsolete ops as this is
* done in the decoding phase.
*/
static nfsd4_enc nfsd4_enc_ops[] = {
[OP_ACCESS] = (nfsd4_enc)nfsd4_encode_access,
[OP_CLOSE] = (nfsd4_enc)nfsd4_encode_close,
[OP_COMMIT] = (nfsd4_enc)nfsd4_encode_commit,
[OP_CREATE] = (nfsd4_enc)nfsd4_encode_create,
[OP_DELEGPURGE] = (nfsd4_enc)nfsd4_encode_noop,
[OP_DELEGRETURN] = (nfsd4_enc)nfsd4_encode_noop,
[OP_GETATTR] = (nfsd4_enc)nfsd4_encode_getattr,
[OP_GETFH] = (nfsd4_enc)nfsd4_encode_getfh,
[OP_LINK] = (nfsd4_enc)nfsd4_encode_link,
[OP_LOCK] = (nfsd4_enc)nfsd4_encode_lock,
[OP_LOCKT] = (nfsd4_enc)nfsd4_encode_lockt,
[OP_LOCKU] = (nfsd4_enc)nfsd4_encode_locku,
[OP_LOOKUP] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LOOKUPP] = (nfsd4_enc)nfsd4_encode_noop,
[OP_NVERIFY] = (nfsd4_enc)nfsd4_encode_noop,
[OP_OPEN] = (nfsd4_enc)nfsd4_encode_open,
[OP_OPENATTR] = (nfsd4_enc)nfsd4_encode_noop,
[OP_OPEN_CONFIRM] = (nfsd4_enc)nfsd4_encode_open_confirm,
[OP_OPEN_DOWNGRADE] = (nfsd4_enc)nfsd4_encode_open_downgrade,
[OP_PUTFH] = (nfsd4_enc)nfsd4_encode_noop,
[OP_PUTPUBFH] = (nfsd4_enc)nfsd4_encode_noop,
[OP_PUTROOTFH] = (nfsd4_enc)nfsd4_encode_noop,
[OP_READ] = (nfsd4_enc)nfsd4_encode_read,
[OP_READDIR] = (nfsd4_enc)nfsd4_encode_readdir,
[OP_READLINK] = (nfsd4_enc)nfsd4_encode_readlink,
[OP_REMOVE] = (nfsd4_enc)nfsd4_encode_remove,
[OP_RENAME] = (nfsd4_enc)nfsd4_encode_rename,
[OP_RENEW] = (nfsd4_enc)nfsd4_encode_noop,
[OP_RESTOREFH] = (nfsd4_enc)nfsd4_encode_noop,
[OP_SAVEFH] = (nfsd4_enc)nfsd4_encode_noop,
[OP_SECINFO] = (nfsd4_enc)nfsd4_encode_secinfo,
[OP_SETATTR] = (nfsd4_enc)nfsd4_encode_setattr,
[OP_SETCLIENTID] = (nfsd4_enc)nfsd4_encode_setclientid,
[OP_SETCLIENTID_CONFIRM] = (nfsd4_enc)nfsd4_encode_noop,
[OP_VERIFY] = (nfsd4_enc)nfsd4_encode_noop,
[OP_WRITE] = (nfsd4_enc)nfsd4_encode_write,
[OP_RELEASE_LOCKOWNER] = (nfsd4_enc)nfsd4_encode_noop,
/* NFSv4.1 operations */
[OP_BACKCHANNEL_CTL] = (nfsd4_enc)nfsd4_encode_noop,
[OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_bind_conn_to_session,
[OP_EXCHANGE_ID] = (nfsd4_enc)nfsd4_encode_exchange_id,
[OP_CREATE_SESSION] = (nfsd4_enc)nfsd4_encode_create_session,
[OP_DESTROY_SESSION] = (nfsd4_enc)nfsd4_encode_noop,
[OP_FREE_STATEID] = (nfsd4_enc)nfsd4_encode_noop,
[OP_GET_DIR_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop,
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = (nfsd4_enc)nfsd4_encode_getdeviceinfo,
[OP_GETDEVICELIST] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_layoutcommit,
[OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_layoutget,
[OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_layoutreturn,
#else
[OP_GETDEVICEINFO] = (nfsd4_enc)nfsd4_encode_noop,
[OP_GETDEVICELIST] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_noop,
#endif
[OP_SECINFO_NO_NAME] = (nfsd4_enc)nfsd4_encode_secinfo_no_name,
[OP_SEQUENCE] = (nfsd4_enc)nfsd4_encode_sequence,
[OP_SET_SSV] = (nfsd4_enc)nfsd4_encode_noop,
[OP_TEST_STATEID] = (nfsd4_enc)nfsd4_encode_test_stateid,
[OP_WANT_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop,
[OP_DESTROY_CLIENTID] = (nfsd4_enc)nfsd4_encode_noop,
[OP_RECLAIM_COMPLETE] = (nfsd4_enc)nfsd4_encode_noop,
/* NFSv4.2 operations */
[OP_ALLOCATE] = (nfsd4_enc)nfsd4_encode_noop,
[OP_COPY] = (nfsd4_enc)nfsd4_encode_copy,
[OP_COPY_NOTIFY] = (nfsd4_enc)nfsd4_encode_noop,
[OP_DEALLOCATE] = (nfsd4_enc)nfsd4_encode_noop,
[OP_IO_ADVISE] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTERROR] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTSTATS] = (nfsd4_enc)nfsd4_encode_noop,
[OP_OFFLOAD_CANCEL] = (nfsd4_enc)nfsd4_encode_noop,
[OP_OFFLOAD_STATUS] = (nfsd4_enc)nfsd4_encode_noop,
[OP_READ_PLUS] = (nfsd4_enc)nfsd4_encode_noop,
[OP_SEEK] = (nfsd4_enc)nfsd4_encode_seek,
[OP_WRITE_SAME] = (nfsd4_enc)nfsd4_encode_noop,
[OP_CLONE] = (nfsd4_enc)nfsd4_encode_noop,
};
/*
* Calculate whether we still have space to encode repsize bytes.
* There are two considerations:
* - For NFS versions >=4.1, the size of the reply must stay within
* session limits
* - For all NFS versions, we must stay within limited preallocated
* buffer space.
*
* This is called before the operation is processed, so can only provide
* an upper estimate. For some nonidempotent operations (such as
* getattr), it's not necessarily a problem if that estimate is wrong,
* as we can fail it after processing without significant side effects.
*/
__be32 nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 respsize)
{
struct xdr_buf *buf = &resp->rqstp->rq_res;
struct nfsd4_slot *slot = resp->cstate.slot;
if (buf->len + respsize <= buf->buflen)
return nfs_ok;
if (!nfsd4_has_session(&resp->cstate))
return nfserr_resource;
if (slot->sl_flags & NFSD4_SLOT_CACHETHIS) {
WARN_ON_ONCE(1);
return nfserr_rep_too_big_to_cache;
}
return nfserr_rep_too_big;
}
void
nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
{
struct xdr_stream *xdr = &resp->xdr;
struct nfs4_stateowner *so = resp->cstate.replay_owner;
struct svc_rqst *rqstp = resp->rqstp;
int post_err_offset;
nfsd4_enc encoder;
__be32 *p;
p = xdr_reserve_space(xdr, 8);
if (!p) {
WARN_ON_ONCE(1);
return;
}
*p++ = cpu_to_be32(op->opnum);
post_err_offset = xdr->buf->len;
if (op->opnum == OP_ILLEGAL)
goto status;
BUG_ON(op->opnum < 0 || op->opnum >= ARRAY_SIZE(nfsd4_enc_ops) ||
!nfsd4_enc_ops[op->opnum]);
encoder = nfsd4_enc_ops[op->opnum];
op->status = encoder(resp, op->status, &op->u);
xdr_commit_encode(xdr);
/* nfsd4_check_resp_size guarantees enough room for error status */
if (!op->status) {
int space_needed = 0;
if (!nfsd4_last_compound_op(rqstp))
space_needed = COMPOUND_ERR_SLACK_SPACE;
op->status = nfsd4_check_resp_size(resp, space_needed);
}
if (op->status == nfserr_resource && nfsd4_has_session(&resp->cstate)) {
struct nfsd4_slot *slot = resp->cstate.slot;
if (slot->sl_flags & NFSD4_SLOT_CACHETHIS)
op->status = nfserr_rep_too_big_to_cache;
else
op->status = nfserr_rep_too_big;
}
if (op->status == nfserr_resource ||
op->status == nfserr_rep_too_big ||
op->status == nfserr_rep_too_big_to_cache) {
/*
* The operation may have already been encoded or
* partially encoded. No op returns anything additional
* in the case of one of these three errors, so we can
* just truncate back to after the status. But it's a
* bug if we had to do this on a non-idempotent op:
*/
warn_on_nonidempotent_op(op);
xdr_truncate_encode(xdr, post_err_offset);
}
if (so) {
int len = xdr->buf->len - post_err_offset;
so->so_replay.rp_status = op->status;
so->so_replay.rp_buflen = len;
read_bytes_from_xdr_buf(xdr->buf, post_err_offset,
so->so_replay.rp_buf, len);
}
status:
/* Note that op->status is already in network byte order: */
write_bytes_to_xdr_buf(xdr->buf, post_err_offset - 4, &op->status, 4);
}
/*
* Encode the reply stored in the stateowner reply cache
*
* XDR note: do not encode rp->rp_buflen: the buffer contains the
* previously sent already encoded operation.
*/
void
nfsd4_encode_replay(struct xdr_stream *xdr, struct nfsd4_op *op)
{
__be32 *p;
struct nfs4_replay *rp = op->replay;
BUG_ON(!rp);
p = xdr_reserve_space(xdr, 8 + rp->rp_buflen);
if (!p) {
WARN_ON_ONCE(1);
return;
}
*p++ = cpu_to_be32(op->opnum);
*p++ = rp->rp_status; /* already xdr'ed */
p = xdr_encode_opaque_fixed(p, rp->rp_buf, rp->rp_buflen);
}
int
nfs4svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
return xdr_ressize_check(rqstp, p);
}
int nfsd4_release_compoundargs(void *rq, __be32 *p, void *resp)
{
struct svc_rqst *rqstp = rq;
struct nfsd4_compoundargs *args = rqstp->rq_argp;
if (args->ops != args->iops) {
kfree(args->ops);
args->ops = args->iops;
}
kfree(args->tmpp);
args->tmpp = NULL;
while (args->to_free) {
struct svcxdr_tmpbuf *tb = args->to_free;
args->to_free = tb->next;
kfree(tb);
}
return 1;
}
int
nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compoundargs *args)
{
if (rqstp->rq_arg.head[0].iov_len % 4) {
/* client is nuts */
dprintk("%s: compound not properly padded! (peeraddr=%pISc xid=0x%x)",
__func__, svc_addr(rqstp), be32_to_cpu(rqstp->rq_xid));
return 0;
}
args->p = p;
args->end = rqstp->rq_arg.head[0].iov_base + rqstp->rq_arg.head[0].iov_len;
args->pagelist = rqstp->rq_arg.pages;
args->pagelen = rqstp->rq_arg.page_len;
args->tmpp = NULL;
args->to_free = NULL;
args->ops = args->iops;
args->rqstp = rqstp;
return !nfsd4_decode_compound(args);
}
int
nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compoundres *resp)
{
/*
* All that remains is to write the tag and operation count...
*/
struct xdr_buf *buf = resp->xdr.buf;
WARN_ON_ONCE(buf->len != buf->head[0].iov_len + buf->page_len +
buf->tail[0].iov_len);
rqstp->rq_next_page = resp->xdr.page_ptr + 1;
p = resp->tagp;
*p++ = htonl(resp->taglen);
memcpy(p, resp->tag, resp->taglen);
p += XDR_QUADLEN(resp->taglen);
*p++ = htonl(resp->opcnt);
nfsd4_sequence_done(resp);
return 1;
}
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| ./CrossVul/dataset_final_sorted/CWE-404/c/bad_3351_6 |
crossvul-cpp_data_bad_3267_1 | /* Manage a process's keyrings
*
* Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/user.h>
#include <linux/keyctl.h>
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/security.h>
#include <linux/user_namespace.h>
#include <linux/uaccess.h>
#include "internal.h"
/* Session keyring create vs join semaphore */
static DEFINE_MUTEX(key_session_mutex);
/* User keyring creation semaphore */
static DEFINE_MUTEX(key_user_keyring_mutex);
/* The root user's tracking struct */
struct key_user root_key_user = {
.usage = ATOMIC_INIT(3),
.cons_lock = __MUTEX_INITIALIZER(root_key_user.cons_lock),
.lock = __SPIN_LOCK_UNLOCKED(root_key_user.lock),
.nkeys = ATOMIC_INIT(2),
.nikeys = ATOMIC_INIT(2),
.uid = GLOBAL_ROOT_UID,
};
/*
* Install the user and user session keyrings for the current process's UID.
*/
int install_user_keyrings(void)
{
struct user_struct *user;
const struct cred *cred;
struct key *uid_keyring, *session_keyring;
key_perm_t user_keyring_perm;
char buf[20];
int ret;
uid_t uid;
user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL;
cred = current_cred();
user = cred->user;
uid = from_kuid(cred->user_ns, user->uid);
kenter("%p{%u}", user, uid);
if (user->uid_keyring && user->session_keyring) {
kleave(" = 0 [exist]");
return 0;
}
mutex_lock(&key_user_keyring_mutex);
ret = 0;
if (!user->uid_keyring) {
/* get the UID-specific keyring
* - there may be one in existence already as it may have been
* pinned by a session, but the user_struct pointing to it
* may have been destroyed by setuid */
sprintf(buf, "_uid.%u", uid);
uid_keyring = find_keyring_by_name(buf, true);
if (IS_ERR(uid_keyring)) {
uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
cred, user_keyring_perm,
KEY_ALLOC_IN_QUOTA,
NULL, NULL);
if (IS_ERR(uid_keyring)) {
ret = PTR_ERR(uid_keyring);
goto error;
}
}
/* get a default session keyring (which might also exist
* already) */
sprintf(buf, "_uid_ses.%u", uid);
session_keyring = find_keyring_by_name(buf, true);
if (IS_ERR(session_keyring)) {
session_keyring =
keyring_alloc(buf, user->uid, INVALID_GID,
cred, user_keyring_perm,
KEY_ALLOC_IN_QUOTA,
NULL, NULL);
if (IS_ERR(session_keyring)) {
ret = PTR_ERR(session_keyring);
goto error_release;
}
/* we install a link from the user session keyring to
* the user keyring */
ret = key_link(session_keyring, uid_keyring);
if (ret < 0)
goto error_release_both;
}
/* install the keyrings */
user->uid_keyring = uid_keyring;
user->session_keyring = session_keyring;
}
mutex_unlock(&key_user_keyring_mutex);
kleave(" = 0");
return 0;
error_release_both:
key_put(session_keyring);
error_release:
key_put(uid_keyring);
error:
mutex_unlock(&key_user_keyring_mutex);
kleave(" = %d", ret);
return ret;
}
/*
* Install a fresh thread keyring directly to new credentials. This keyring is
* allowed to overrun the quota.
*/
int install_thread_keyring_to_cred(struct cred *new)
{
struct key *keyring;
keyring = keyring_alloc("_tid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN,
NULL, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
new->thread_keyring = keyring;
return 0;
}
/*
* Install a fresh thread keyring, discarding the old one.
*/
static int install_thread_keyring(void)
{
struct cred *new;
int ret;
new = prepare_creds();
if (!new)
return -ENOMEM;
BUG_ON(new->thread_keyring);
ret = install_thread_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
return ret;
}
return commit_creds(new);
}
/*
* Install a process keyring directly to a credentials struct.
*
* Returns -EEXIST if there was already a process keyring, 0 if one installed,
* and other value on any other error
*/
int install_process_keyring_to_cred(struct cred *new)
{
struct key *keyring;
if (new->process_keyring)
return -EEXIST;
keyring = keyring_alloc("_pid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN,
NULL, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
new->process_keyring = keyring;
return 0;
}
/*
* Make sure a process keyring is installed for the current process. The
* existing process keyring is not replaced.
*
* Returns 0 if there is a process keyring by the end of this function, some
* error otherwise.
*/
static int install_process_keyring(void)
{
struct cred *new;
int ret;
new = prepare_creds();
if (!new)
return -ENOMEM;
ret = install_process_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
return ret != -EEXIST ? ret : 0;
}
return commit_creds(new);
}
/*
* Install a session keyring directly to a credentials struct.
*/
int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
{
unsigned long flags;
struct key *old;
might_sleep();
/* create an empty session keyring */
if (!keyring) {
flags = KEY_ALLOC_QUOTA_OVERRUN;
if (cred->session_keyring)
flags = KEY_ALLOC_IN_QUOTA;
keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred,
KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
flags, NULL, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
} else {
__key_get(keyring);
}
/* install the keyring */
old = cred->session_keyring;
rcu_assign_pointer(cred->session_keyring, keyring);
if (old)
key_put(old);
return 0;
}
/*
* Install a session keyring, discarding the old one. If a keyring is not
* supplied, an empty one is invented.
*/
static int install_session_keyring(struct key *keyring)
{
struct cred *new;
int ret;
new = prepare_creds();
if (!new)
return -ENOMEM;
ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0) {
abort_creds(new);
return ret;
}
return commit_creds(new);
}
/*
* Handle the fsuid changing.
*/
void key_fsuid_changed(struct task_struct *tsk)
{
/* update the ownership of the thread keyring */
BUG_ON(!tsk->cred);
if (tsk->cred->thread_keyring) {
down_write(&tsk->cred->thread_keyring->sem);
tsk->cred->thread_keyring->uid = tsk->cred->fsuid;
up_write(&tsk->cred->thread_keyring->sem);
}
}
/*
* Handle the fsgid changing.
*/
void key_fsgid_changed(struct task_struct *tsk)
{
/* update the ownership of the thread keyring */
BUG_ON(!tsk->cred);
if (tsk->cred->thread_keyring) {
down_write(&tsk->cred->thread_keyring->sem);
tsk->cred->thread_keyring->gid = tsk->cred->fsgid;
up_write(&tsk->cred->thread_keyring->sem);
}
}
/*
* Search the process keyrings attached to the supplied cred for the first
* matching key.
*
* The search criteria are the type and the match function. The description is
* given to the match function as a parameter, but doesn't otherwise influence
* the search. Typically the match function will compare the description
* parameter to the key's description.
*
* This can only search keyrings that grant Search permission to the supplied
* credentials. Keyrings linked to searched keyrings will also be searched if
* they grant Search permission too. Keys can only be found if they grant
* Search permission to the credentials.
*
* Returns a pointer to the key with the key usage count incremented if
* successful, -EAGAIN if we didn't find any matching key or -ENOKEY if we only
* matched negative keys.
*
* In the case of a successful return, the possession attribute is set on the
* returned key reference.
*/
key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
{
key_ref_t key_ref, ret, err;
/* we want to return -EAGAIN or -ENOKEY if any of the keyrings were
* searchable, but we failed to find a key or we found a negative key;
* otherwise we want to return a sample error (probably -EACCES) if
* none of the keyrings were searchable
*
* in terms of priority: success > -ENOKEY > -EAGAIN > other error
*/
key_ref = NULL;
ret = NULL;
err = ERR_PTR(-EAGAIN);
/* search the thread keyring first */
if (ctx->cred->thread_keyring) {
key_ref = keyring_search_aux(
make_key_ref(ctx->cred->thread_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* search the process keyring second */
if (ctx->cred->process_keyring) {
key_ref = keyring_search_aux(
make_key_ref(ctx->cred->process_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* search the session keyring */
if (ctx->cred->session_keyring) {
rcu_read_lock();
key_ref = keyring_search_aux(
make_key_ref(rcu_dereference(ctx->cred->session_keyring), 1),
ctx);
rcu_read_unlock();
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* or search the user-session keyring */
else if (ctx->cred->user->session_keyring) {
key_ref = keyring_search_aux(
make_key_ref(ctx->cred->user->session_keyring, 1),
ctx);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* no key - decide on the error we're going to go for */
key_ref = ret ? ret : err;
found:
return key_ref;
}
/*
* Search the process keyrings attached to the supplied cred for the first
* matching key in the manner of search_my_process_keyrings(), but also search
* the keys attached to the assumed authorisation key using its credentials if
* one is available.
*
* Return same as search_my_process_keyrings().
*/
key_ref_t search_process_keyrings(struct keyring_search_context *ctx)
{
struct request_key_auth *rka;
key_ref_t key_ref, ret = ERR_PTR(-EACCES), err;
might_sleep();
key_ref = search_my_process_keyrings(ctx);
if (!IS_ERR(key_ref))
goto found;
err = key_ref;
/* if this process has an instantiation authorisation key, then we also
* search the keyrings of the process mentioned there
* - we don't permit access to request_key auth keys via this method
*/
if (ctx->cred->request_key_auth &&
ctx->cred == current_cred() &&
ctx->index_key.type != &key_type_request_key_auth
) {
const struct cred *cred = ctx->cred;
/* defend against the auth key being revoked */
down_read(&cred->request_key_auth->sem);
if (key_validate(ctx->cred->request_key_auth) == 0) {
rka = ctx->cred->request_key_auth->payload.data[0];
ctx->cred = rka->cred;
key_ref = search_process_keyrings(ctx);
ctx->cred = cred;
up_read(&cred->request_key_auth->sem);
if (!IS_ERR(key_ref))
goto found;
ret = key_ref;
} else {
up_read(&cred->request_key_auth->sem);
}
}
/* no key - decide on the error we're going to go for */
if (err == ERR_PTR(-ENOKEY) || ret == ERR_PTR(-ENOKEY))
key_ref = ERR_PTR(-ENOKEY);
else if (err == ERR_PTR(-EACCES))
key_ref = ret;
else
key_ref = err;
found:
return key_ref;
}
/*
* See if the key we're looking at is the target key.
*/
bool lookup_user_key_possessed(const struct key *key,
const struct key_match_data *match_data)
{
return key == match_data->raw_data;
}
/*
* Look up a key ID given us by userspace with a given permissions mask to get
* the key it refers to.
*
* Flags can be passed to request that special keyrings be created if referred
* to directly, to permit partially constructed keys to be found and to skip
* validity and permission checks on the found key.
*
* Returns a pointer to the key with an incremented usage count if successful;
* -EINVAL if the key ID is invalid; -ENOKEY if the key ID does not correspond
* to a key or the best found key was a negative key; -EKEYREVOKED or
* -EKEYEXPIRED if the best found key was revoked or expired; -EACCES if the
* found key doesn't grant the requested permit or the LSM denied access to it;
* or -ENOMEM if a special keyring couldn't be created.
*
* In the case of a successful return, the possession attribute is set on the
* returned key reference.
*/
key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
key_perm_t perm)
{
struct keyring_search_context ctx = {
.match_data.cmp = lookup_user_key_possessed,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.flags = KEYRING_SEARCH_NO_STATE_CHECK,
};
struct request_key_auth *rka;
struct key *key;
key_ref_t key_ref, skey_ref;
int ret;
try_again:
ctx.cred = get_current_cred();
key_ref = ERR_PTR(-ENOKEY);
switch (id) {
case KEY_SPEC_THREAD_KEYRING:
if (!ctx.cred->thread_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
ret = install_thread_keyring();
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error;
}
goto reget_creds;
}
key = ctx.cred->thread_keyring;
__key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_PROCESS_KEYRING:
if (!ctx.cred->process_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
ret = install_process_keyring();
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error;
}
goto reget_creds;
}
key = ctx.cred->process_keyring;
__key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_SESSION_KEYRING:
if (!ctx.cred->session_keyring) {
/* always install a session keyring upon access if one
* doesn't exist yet */
ret = install_user_keyrings();
if (ret < 0)
goto error;
if (lflags & KEY_LOOKUP_CREATE)
ret = join_session_keyring(NULL);
else
ret = install_session_keyring(
ctx.cred->user->session_keyring);
if (ret < 0)
goto error;
goto reget_creds;
} else if (ctx.cred->session_keyring ==
ctx.cred->user->session_keyring &&
lflags & KEY_LOOKUP_CREATE) {
ret = join_session_keyring(NULL);
if (ret < 0)
goto error;
goto reget_creds;
}
rcu_read_lock();
key = rcu_dereference(ctx.cred->session_keyring);
__key_get(key);
rcu_read_unlock();
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_KEYRING:
if (!ctx.cred->user->uid_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
key = ctx.cred->user->uid_keyring;
__key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_SESSION_KEYRING:
if (!ctx.cred->user->session_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
key = ctx.cred->user->session_keyring;
__key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_GROUP_KEYRING:
/* group keyrings are not yet supported */
key_ref = ERR_PTR(-EINVAL);
goto error;
case KEY_SPEC_REQKEY_AUTH_KEY:
key = ctx.cred->request_key_auth;
if (!key)
goto error;
__key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_REQUESTOR_KEYRING:
if (!ctx.cred->request_key_auth)
goto error;
down_read(&ctx.cred->request_key_auth->sem);
if (test_bit(KEY_FLAG_REVOKED,
&ctx.cred->request_key_auth->flags)) {
key_ref = ERR_PTR(-EKEYREVOKED);
key = NULL;
} else {
rka = ctx.cred->request_key_auth->payload.data[0];
key = rka->dest_keyring;
__key_get(key);
}
up_read(&ctx.cred->request_key_auth->sem);
if (!key)
goto error;
key_ref = make_key_ref(key, 1);
break;
default:
key_ref = ERR_PTR(-EINVAL);
if (id < 1)
goto error;
key = key_lookup(id);
if (IS_ERR(key)) {
key_ref = ERR_CAST(key);
goto error;
}
key_ref = make_key_ref(key, 0);
/* check to see if we possess the key */
ctx.index_key.type = key->type;
ctx.index_key.description = key->description;
ctx.index_key.desc_len = strlen(key->description);
ctx.match_data.raw_data = key;
kdebug("check possessed");
skey_ref = search_process_keyrings(&ctx);
kdebug("possessed=%p", skey_ref);
if (!IS_ERR(skey_ref)) {
key_put(key);
key_ref = skey_ref;
}
break;
}
/* unlink does not use the nominated key in any way, so can skip all
* the permission checks as it is only concerned with the keyring */
if (lflags & KEY_LOOKUP_FOR_UNLINK) {
ret = 0;
goto error;
}
if (!(lflags & KEY_LOOKUP_PARTIAL)) {
ret = wait_for_key_construction(key, true);
switch (ret) {
case -ERESTARTSYS:
goto invalid_key;
default:
if (perm)
goto invalid_key;
case 0:
break;
}
} else if (perm) {
ret = key_validate(key);
if (ret < 0)
goto invalid_key;
}
ret = -EIO;
if (!(lflags & KEY_LOOKUP_PARTIAL) &&
!test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
goto invalid_key;
/* check the permissions */
ret = key_task_permission(key_ref, ctx.cred, perm);
if (ret < 0)
goto invalid_key;
key->last_used_at = current_kernel_time().tv_sec;
error:
put_cred(ctx.cred);
return key_ref;
invalid_key:
key_ref_put(key_ref);
key_ref = ERR_PTR(ret);
goto error;
/* if we attempted to install a keyring, then it may have caused new
* creds to be installed */
reget_creds:
put_cred(ctx.cred);
goto try_again;
}
/*
* Join the named keyring as the session keyring if possible else attempt to
* create a new one of that name and join that.
*
* If the name is NULL, an empty anonymous keyring will be installed as the
* session keyring.
*
* Named session keyrings are joined with a semaphore held to prevent the
* keyrings from going away whilst the attempt is made to going them and also
* to prevent a race in creating compatible session keyrings.
*/
long join_session_keyring(const char *name)
{
const struct cred *old;
struct cred *new;
struct key *keyring;
long ret, serial;
new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();
/* if no name is provided, install an anonymous keyring */
if (!name) {
ret = install_session_keyring_to_cred(new, NULL);
if (ret < 0)
goto error;
serial = new->session_keyring->serial;
ret = commit_creds(new);
if (ret == 0)
ret = serial;
goto okay;
}
/* allow the user to join or create a named keyring */
mutex_lock(&key_session_mutex);
/* look for an existing keyring of this name */
keyring = find_keyring_by_name(name, false);
if (PTR_ERR(keyring) == -ENOKEY) {
/* not found - try and create a new one */
keyring = keyring_alloc(
name, old->uid, old->gid, old,
KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK,
KEY_ALLOC_IN_QUOTA, NULL, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
}
} else if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
} else if (keyring == new->session_keyring) {
key_put(keyring);
ret = 0;
goto error2;
}
/* we've got a keyring - now to install it */
ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0)
goto error2;
commit_creds(new);
mutex_unlock(&key_session_mutex);
ret = keyring->serial;
key_put(keyring);
okay:
return ret;
error2:
mutex_unlock(&key_session_mutex);
error:
abort_creds(new);
return ret;
}
/*
* Replace a process's session keyring on behalf of one of its children when
* the target process is about to resume userspace execution.
*/
void key_change_session_keyring(struct callback_head *twork)
{
const struct cred *old = current_cred();
struct cred *new = container_of(twork, struct cred, rcu);
if (unlikely(current->flags & PF_EXITING)) {
put_cred(new);
return;
}
new-> uid = old-> uid;
new-> euid = old-> euid;
new-> suid = old-> suid;
new->fsuid = old->fsuid;
new-> gid = old-> gid;
new-> egid = old-> egid;
new-> sgid = old-> sgid;
new->fsgid = old->fsgid;
new->user = get_uid(old->user);
new->user_ns = get_user_ns(old->user_ns);
new->group_info = get_group_info(old->group_info);
new->securebits = old->securebits;
new->cap_inheritable = old->cap_inheritable;
new->cap_permitted = old->cap_permitted;
new->cap_effective = old->cap_effective;
new->cap_ambient = old->cap_ambient;
new->cap_bset = old->cap_bset;
new->jit_keyring = old->jit_keyring;
new->thread_keyring = key_get(old->thread_keyring);
new->process_keyring = key_get(old->process_keyring);
security_transfer_creds(new, old);
commit_creds(new);
}
/*
* Make sure that root's user and user-session keyrings exist.
*/
static int __init init_root_keyring(void)
{
return install_user_keyrings();
}
late_initcall(init_root_keyring);
| ./CrossVul/dataset_final_sorted/CWE-404/c/bad_3267_1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.